xref: /linux/drivers/pci/pci.c (revision 778e73d2411abc8f3a2d60dbf038acaec218792e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Bus Services, see include/linux/pci.h for further explanation.
4  *
5  * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6  * David Mosberger-Tang
7  *
8  * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/interrupt.h>
28 #include <linux/device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pci_hotplug.h>
31 #include <linux/vmalloc.h>
32 #include <asm/dma.h>
33 #include <linux/aer.h>
34 #include <linux/bitfield.h>
35 #include "pci.h"
36 
37 DEFINE_MUTEX(pci_slot_mutex);
38 
39 const char *pci_power_names[] = {
40 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
41 };
42 EXPORT_SYMBOL_GPL(pci_power_names);
43 
44 #ifdef CONFIG_X86_32
45 int isa_dma_bridge_buggy;
46 EXPORT_SYMBOL(isa_dma_bridge_buggy);
47 #endif
48 
49 int pci_pci_problems;
50 EXPORT_SYMBOL(pci_pci_problems);
51 
52 unsigned int pci_pm_d3hot_delay;
53 
54 static void pci_pme_list_scan(struct work_struct *work);
55 
56 static LIST_HEAD(pci_pme_list);
57 static DEFINE_MUTEX(pci_pme_list_mutex);
58 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
59 
60 struct pci_pme_device {
61 	struct list_head list;
62 	struct pci_dev *dev;
63 };
64 
65 #define PME_TIMEOUT 1000 /* How long between PME checks */
66 
67 /*
68  * Following exit from Conventional Reset, devices must be ready within 1 sec
69  * (PCIe r6.0 sec 6.6.1).  A D3cold to D0 transition implies a Conventional
70  * Reset (PCIe r6.0 sec 5.8).
71  */
72 #define PCI_RESET_WAIT 1000 /* msec */
73 
74 /*
75  * Devices may extend the 1 sec period through Request Retry Status
76  * completions (PCIe r6.0 sec 2.3.1).  The spec does not provide an upper
77  * limit, but 60 sec ought to be enough for any device to become
78  * responsive.
79  */
80 #define PCIE_RESET_READY_POLL_MS 60000 /* msec */
81 
82 static void pci_dev_d3_sleep(struct pci_dev *dev)
83 {
84 	unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
85 	unsigned int upper;
86 
87 	if (delay_ms) {
88 		/* Use a 20% upper bound, 1ms minimum */
89 		upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
90 		usleep_range(delay_ms * USEC_PER_MSEC,
91 			     (delay_ms + upper) * USEC_PER_MSEC);
92 	}
93 }
94 
95 bool pci_reset_supported(struct pci_dev *dev)
96 {
97 	return dev->reset_methods[0] != 0;
98 }
99 
100 #ifdef CONFIG_PCI_DOMAINS
101 int pci_domains_supported = 1;
102 #endif
103 
104 #define DEFAULT_CARDBUS_IO_SIZE		(256)
105 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
106 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
107 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
108 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
109 
110 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
111 #define DEFAULT_HOTPLUG_MMIO_SIZE	(2*1024*1024)
112 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE	(2*1024*1024)
113 /* hpiosize=nn can override this */
114 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
115 /*
116  * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
117  * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
118  * pci=hpmemsize=nnM overrides both
119  */
120 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
121 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
122 
123 #define DEFAULT_HOTPLUG_BUS_SIZE	1
124 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
125 
126 
127 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
128 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
129 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
130 #elif defined CONFIG_PCIE_BUS_SAFE
131 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
132 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
133 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
134 #elif defined CONFIG_PCIE_BUS_PEER2PEER
135 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
136 #else
137 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
138 #endif
139 
140 /*
141  * The default CLS is used if arch didn't set CLS explicitly and not
142  * all pci devices agree on the same value.  Arch can override either
143  * the dfl or actual value as it sees fit.  Don't forget this is
144  * measured in 32-bit words, not bytes.
145  */
146 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
147 u8 pci_cache_line_size;
148 
149 /*
150  * If we set up a device for bus mastering, we need to check the latency
151  * timer as certain BIOSes forget to set it properly.
152  */
153 unsigned int pcibios_max_latency = 255;
154 
155 /* If set, the PCIe ARI capability will not be used. */
156 static bool pcie_ari_disabled;
157 
158 /* If set, the PCIe ATS capability will not be used. */
159 static bool pcie_ats_disabled;
160 
161 /* If set, the PCI config space of each device is printed during boot. */
162 bool pci_early_dump;
163 
164 bool pci_ats_disabled(void)
165 {
166 	return pcie_ats_disabled;
167 }
168 EXPORT_SYMBOL_GPL(pci_ats_disabled);
169 
170 /* Disable bridge_d3 for all PCIe ports */
171 static bool pci_bridge_d3_disable;
172 /* Force bridge_d3 for all PCIe ports */
173 static bool pci_bridge_d3_force;
174 
175 static int __init pcie_port_pm_setup(char *str)
176 {
177 	if (!strcmp(str, "off"))
178 		pci_bridge_d3_disable = true;
179 	else if (!strcmp(str, "force"))
180 		pci_bridge_d3_force = true;
181 	return 1;
182 }
183 __setup("pcie_port_pm=", pcie_port_pm_setup);
184 
185 /**
186  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
187  * @bus: pointer to PCI bus structure to search
188  *
189  * Given a PCI bus, returns the highest PCI bus number present in the set
190  * including the given PCI bus and its list of child PCI buses.
191  */
192 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
193 {
194 	struct pci_bus *tmp;
195 	unsigned char max, n;
196 
197 	max = bus->busn_res.end;
198 	list_for_each_entry(tmp, &bus->children, node) {
199 		n = pci_bus_max_busnr(tmp);
200 		if (n > max)
201 			max = n;
202 	}
203 	return max;
204 }
205 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
206 
207 /**
208  * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
209  * @pdev: the PCI device
210  *
211  * Returns error bits set in PCI_STATUS and clears them.
212  */
213 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
214 {
215 	u16 status;
216 	int ret;
217 
218 	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
219 	if (ret != PCIBIOS_SUCCESSFUL)
220 		return -EIO;
221 
222 	status &= PCI_STATUS_ERROR_BITS;
223 	if (status)
224 		pci_write_config_word(pdev, PCI_STATUS, status);
225 
226 	return status;
227 }
228 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
229 
230 #ifdef CONFIG_HAS_IOMEM
231 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
232 					    bool write_combine)
233 {
234 	struct resource *res = &pdev->resource[bar];
235 	resource_size_t start = res->start;
236 	resource_size_t size = resource_size(res);
237 
238 	/*
239 	 * Make sure the BAR is actually a memory resource, not an IO resource
240 	 */
241 	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
242 		pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
243 		return NULL;
244 	}
245 
246 	if (write_combine)
247 		return ioremap_wc(start, size);
248 
249 	return ioremap(start, size);
250 }
251 
252 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
253 {
254 	return __pci_ioremap_resource(pdev, bar, false);
255 }
256 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
257 
258 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
259 {
260 	return __pci_ioremap_resource(pdev, bar, true);
261 }
262 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
263 #endif
264 
265 /**
266  * pci_dev_str_match_path - test if a path string matches a device
267  * @dev: the PCI device to test
268  * @path: string to match the device against
269  * @endptr: pointer to the string after the match
270  *
271  * Test if a string (typically from a kernel parameter) formatted as a
272  * path of device/function addresses matches a PCI device. The string must
273  * be of the form:
274  *
275  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
276  *
277  * A path for a device can be obtained using 'lspci -t'.  Using a path
278  * is more robust against bus renumbering than using only a single bus,
279  * device and function address.
280  *
281  * Returns 1 if the string matches the device, 0 if it does not and
282  * a negative error code if it fails to parse the string.
283  */
284 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
285 				  const char **endptr)
286 {
287 	int ret;
288 	unsigned int seg, bus, slot, func;
289 	char *wpath, *p;
290 	char end;
291 
292 	*endptr = strchrnul(path, ';');
293 
294 	wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
295 	if (!wpath)
296 		return -ENOMEM;
297 
298 	while (1) {
299 		p = strrchr(wpath, '/');
300 		if (!p)
301 			break;
302 		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
303 		if (ret != 2) {
304 			ret = -EINVAL;
305 			goto free_and_exit;
306 		}
307 
308 		if (dev->devfn != PCI_DEVFN(slot, func)) {
309 			ret = 0;
310 			goto free_and_exit;
311 		}
312 
313 		/*
314 		 * Note: we don't need to get a reference to the upstream
315 		 * bridge because we hold a reference to the top level
316 		 * device which should hold a reference to the bridge,
317 		 * and so on.
318 		 */
319 		dev = pci_upstream_bridge(dev);
320 		if (!dev) {
321 			ret = 0;
322 			goto free_and_exit;
323 		}
324 
325 		*p = 0;
326 	}
327 
328 	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
329 		     &func, &end);
330 	if (ret != 4) {
331 		seg = 0;
332 		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
333 		if (ret != 3) {
334 			ret = -EINVAL;
335 			goto free_and_exit;
336 		}
337 	}
338 
339 	ret = (seg == pci_domain_nr(dev->bus) &&
340 	       bus == dev->bus->number &&
341 	       dev->devfn == PCI_DEVFN(slot, func));
342 
343 free_and_exit:
344 	kfree(wpath);
345 	return ret;
346 }
347 
348 /**
349  * pci_dev_str_match - test if a string matches a device
350  * @dev: the PCI device to test
351  * @p: string to match the device against
352  * @endptr: pointer to the string after the match
353  *
354  * Test if a string (typically from a kernel parameter) matches a specified
355  * PCI device. The string may be of one of the following formats:
356  *
357  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
358  *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
359  *
360  * The first format specifies a PCI bus/device/function address which
361  * may change if new hardware is inserted, if motherboard firmware changes,
362  * or due to changes caused in kernel parameters. If the domain is
363  * left unspecified, it is taken to be 0.  In order to be robust against
364  * bus renumbering issues, a path of PCI device/function numbers may be used
365  * to address the specific device.  The path for a device can be determined
366  * through the use of 'lspci -t'.
367  *
368  * The second format matches devices using IDs in the configuration
369  * space which may match multiple devices in the system. A value of 0
370  * for any field will match all devices. (Note: this differs from
371  * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
372  * legacy reasons and convenience so users don't have to specify
373  * FFFFFFFFs on the command line.)
374  *
375  * Returns 1 if the string matches the device, 0 if it does not and
376  * a negative error code if the string cannot be parsed.
377  */
378 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
379 			     const char **endptr)
380 {
381 	int ret;
382 	int count;
383 	unsigned short vendor, device, subsystem_vendor, subsystem_device;
384 
385 	if (strncmp(p, "pci:", 4) == 0) {
386 		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
387 		p += 4;
388 		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
389 			     &subsystem_vendor, &subsystem_device, &count);
390 		if (ret != 4) {
391 			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
392 			if (ret != 2)
393 				return -EINVAL;
394 
395 			subsystem_vendor = 0;
396 			subsystem_device = 0;
397 		}
398 
399 		p += count;
400 
401 		if ((!vendor || vendor == dev->vendor) &&
402 		    (!device || device == dev->device) &&
403 		    (!subsystem_vendor ||
404 			    subsystem_vendor == dev->subsystem_vendor) &&
405 		    (!subsystem_device ||
406 			    subsystem_device == dev->subsystem_device))
407 			goto found;
408 	} else {
409 		/*
410 		 * PCI Bus, Device, Function IDs are specified
411 		 * (optionally, may include a path of devfns following it)
412 		 */
413 		ret = pci_dev_str_match_path(dev, p, &p);
414 		if (ret < 0)
415 			return ret;
416 		else if (ret)
417 			goto found;
418 	}
419 
420 	*endptr = p;
421 	return 0;
422 
423 found:
424 	*endptr = p;
425 	return 1;
426 }
427 
428 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
429 				  u8 pos, int cap, int *ttl)
430 {
431 	u8 id;
432 	u16 ent;
433 
434 	pci_bus_read_config_byte(bus, devfn, pos, &pos);
435 
436 	while ((*ttl)--) {
437 		if (pos < 0x40)
438 			break;
439 		pos &= ~3;
440 		pci_bus_read_config_word(bus, devfn, pos, &ent);
441 
442 		id = ent & 0xff;
443 		if (id == 0xff)
444 			break;
445 		if (id == cap)
446 			return pos;
447 		pos = (ent >> 8);
448 	}
449 	return 0;
450 }
451 
452 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
453 			      u8 pos, int cap)
454 {
455 	int ttl = PCI_FIND_CAP_TTL;
456 
457 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
458 }
459 
460 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
461 {
462 	return __pci_find_next_cap(dev->bus, dev->devfn,
463 				   pos + PCI_CAP_LIST_NEXT, cap);
464 }
465 EXPORT_SYMBOL_GPL(pci_find_next_capability);
466 
467 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
468 				    unsigned int devfn, u8 hdr_type)
469 {
470 	u16 status;
471 
472 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
473 	if (!(status & PCI_STATUS_CAP_LIST))
474 		return 0;
475 
476 	switch (hdr_type) {
477 	case PCI_HEADER_TYPE_NORMAL:
478 	case PCI_HEADER_TYPE_BRIDGE:
479 		return PCI_CAPABILITY_LIST;
480 	case PCI_HEADER_TYPE_CARDBUS:
481 		return PCI_CB_CAPABILITY_LIST;
482 	}
483 
484 	return 0;
485 }
486 
487 /**
488  * pci_find_capability - query for devices' capabilities
489  * @dev: PCI device to query
490  * @cap: capability code
491  *
492  * Tell if a device supports a given PCI capability.
493  * Returns the address of the requested capability structure within the
494  * device's PCI configuration space or 0 in case the device does not
495  * support it.  Possible values for @cap include:
496  *
497  *  %PCI_CAP_ID_PM           Power Management
498  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
499  *  %PCI_CAP_ID_VPD          Vital Product Data
500  *  %PCI_CAP_ID_SLOTID       Slot Identification
501  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
502  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
503  *  %PCI_CAP_ID_PCIX         PCI-X
504  *  %PCI_CAP_ID_EXP          PCI Express
505  */
506 u8 pci_find_capability(struct pci_dev *dev, int cap)
507 {
508 	u8 pos;
509 
510 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
511 	if (pos)
512 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
513 
514 	return pos;
515 }
516 EXPORT_SYMBOL(pci_find_capability);
517 
518 /**
519  * pci_bus_find_capability - query for devices' capabilities
520  * @bus: the PCI bus to query
521  * @devfn: PCI device to query
522  * @cap: capability code
523  *
524  * Like pci_find_capability() but works for PCI devices that do not have a
525  * pci_dev structure set up yet.
526  *
527  * Returns the address of the requested capability structure within the
528  * device's PCI configuration space or 0 in case the device does not
529  * support it.
530  */
531 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
532 {
533 	u8 hdr_type, pos;
534 
535 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
536 
537 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK);
538 	if (pos)
539 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
540 
541 	return pos;
542 }
543 EXPORT_SYMBOL(pci_bus_find_capability);
544 
545 /**
546  * pci_find_next_ext_capability - Find an extended capability
547  * @dev: PCI device to query
548  * @start: address at which to start looking (0 to start at beginning of list)
549  * @cap: capability code
550  *
551  * Returns the address of the next matching extended capability structure
552  * within the device's PCI configuration space or 0 if the device does
553  * not support it.  Some capabilities can occur several times, e.g., the
554  * vendor-specific capability, and this provides a way to find them all.
555  */
556 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
557 {
558 	u32 header;
559 	int ttl;
560 	u16 pos = PCI_CFG_SPACE_SIZE;
561 
562 	/* minimum 8 bytes per capability */
563 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
564 
565 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
566 		return 0;
567 
568 	if (start)
569 		pos = start;
570 
571 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
572 		return 0;
573 
574 	/*
575 	 * If we have no capabilities, this is indicated by cap ID,
576 	 * cap version and next pointer all being 0.
577 	 */
578 	if (header == 0)
579 		return 0;
580 
581 	while (ttl-- > 0) {
582 		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
583 			return pos;
584 
585 		pos = PCI_EXT_CAP_NEXT(header);
586 		if (pos < PCI_CFG_SPACE_SIZE)
587 			break;
588 
589 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
590 			break;
591 	}
592 
593 	return 0;
594 }
595 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
596 
597 /**
598  * pci_find_ext_capability - Find an extended capability
599  * @dev: PCI device to query
600  * @cap: capability code
601  *
602  * Returns the address of the requested extended capability structure
603  * within the device's PCI configuration space or 0 if the device does
604  * not support it.  Possible values for @cap include:
605  *
606  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
607  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
608  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
609  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
610  */
611 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
612 {
613 	return pci_find_next_ext_capability(dev, 0, cap);
614 }
615 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
616 
617 /**
618  * pci_get_dsn - Read and return the 8-byte Device Serial Number
619  * @dev: PCI device to query
620  *
621  * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
622  * Number.
623  *
624  * Returns the DSN, or zero if the capability does not exist.
625  */
626 u64 pci_get_dsn(struct pci_dev *dev)
627 {
628 	u32 dword;
629 	u64 dsn;
630 	int pos;
631 
632 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
633 	if (!pos)
634 		return 0;
635 
636 	/*
637 	 * The Device Serial Number is two dwords offset 4 bytes from the
638 	 * capability position. The specification says that the first dword is
639 	 * the lower half, and the second dword is the upper half.
640 	 */
641 	pos += 4;
642 	pci_read_config_dword(dev, pos, &dword);
643 	dsn = (u64)dword;
644 	pci_read_config_dword(dev, pos + 4, &dword);
645 	dsn |= ((u64)dword) << 32;
646 
647 	return dsn;
648 }
649 EXPORT_SYMBOL_GPL(pci_get_dsn);
650 
651 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
652 {
653 	int rc, ttl = PCI_FIND_CAP_TTL;
654 	u8 cap, mask;
655 
656 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
657 		mask = HT_3BIT_CAP_MASK;
658 	else
659 		mask = HT_5BIT_CAP_MASK;
660 
661 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
662 				      PCI_CAP_ID_HT, &ttl);
663 	while (pos) {
664 		rc = pci_read_config_byte(dev, pos + 3, &cap);
665 		if (rc != PCIBIOS_SUCCESSFUL)
666 			return 0;
667 
668 		if ((cap & mask) == ht_cap)
669 			return pos;
670 
671 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
672 					      pos + PCI_CAP_LIST_NEXT,
673 					      PCI_CAP_ID_HT, &ttl);
674 	}
675 
676 	return 0;
677 }
678 
679 /**
680  * pci_find_next_ht_capability - query a device's HyperTransport capabilities
681  * @dev: PCI device to query
682  * @pos: Position from which to continue searching
683  * @ht_cap: HyperTransport capability code
684  *
685  * To be used in conjunction with pci_find_ht_capability() to search for
686  * all capabilities matching @ht_cap. @pos should always be a value returned
687  * from pci_find_ht_capability().
688  *
689  * NB. To be 100% safe against broken PCI devices, the caller should take
690  * steps to avoid an infinite loop.
691  */
692 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
693 {
694 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
695 }
696 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
697 
698 /**
699  * pci_find_ht_capability - query a device's HyperTransport capabilities
700  * @dev: PCI device to query
701  * @ht_cap: HyperTransport capability code
702  *
703  * Tell if a device supports a given HyperTransport capability.
704  * Returns an address within the device's PCI configuration space
705  * or 0 in case the device does not support the request capability.
706  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
707  * which has a HyperTransport capability matching @ht_cap.
708  */
709 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
710 {
711 	u8 pos;
712 
713 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
714 	if (pos)
715 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
716 
717 	return pos;
718 }
719 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
720 
721 /**
722  * pci_find_vsec_capability - Find a vendor-specific extended capability
723  * @dev: PCI device to query
724  * @vendor: Vendor ID for which capability is defined
725  * @cap: Vendor-specific capability ID
726  *
727  * If @dev has Vendor ID @vendor, search for a VSEC capability with
728  * VSEC ID @cap. If found, return the capability offset in
729  * config space; otherwise return 0.
730  */
731 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
732 {
733 	u16 vsec = 0;
734 	u32 header;
735 	int ret;
736 
737 	if (vendor != dev->vendor)
738 		return 0;
739 
740 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
741 						     PCI_EXT_CAP_ID_VNDR))) {
742 		ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
743 		if (ret != PCIBIOS_SUCCESSFUL)
744 			continue;
745 
746 		if (PCI_VNDR_HEADER_ID(header) == cap)
747 			return vsec;
748 	}
749 
750 	return 0;
751 }
752 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
753 
754 /**
755  * pci_find_dvsec_capability - Find DVSEC for vendor
756  * @dev: PCI device to query
757  * @vendor: Vendor ID to match for the DVSEC
758  * @dvsec: Designated Vendor-specific capability ID
759  *
760  * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
761  * offset in config space; otherwise return 0.
762  */
763 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
764 {
765 	int pos;
766 
767 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
768 	if (!pos)
769 		return 0;
770 
771 	while (pos) {
772 		u16 v, id;
773 
774 		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
775 		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
776 		if (vendor == v && dvsec == id)
777 			return pos;
778 
779 		pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
780 	}
781 
782 	return 0;
783 }
784 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
785 
786 /**
787  * pci_find_parent_resource - return resource region of parent bus of given
788  *			      region
789  * @dev: PCI device structure contains resources to be searched
790  * @res: child resource record for which parent is sought
791  *
792  * For given resource region of given device, return the resource region of
793  * parent bus the given region is contained in.
794  */
795 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
796 					  struct resource *res)
797 {
798 	const struct pci_bus *bus = dev->bus;
799 	struct resource *r;
800 
801 	pci_bus_for_each_resource(bus, r) {
802 		if (!r)
803 			continue;
804 		if (resource_contains(r, res)) {
805 
806 			/*
807 			 * If the window is prefetchable but the BAR is
808 			 * not, the allocator made a mistake.
809 			 */
810 			if (r->flags & IORESOURCE_PREFETCH &&
811 			    !(res->flags & IORESOURCE_PREFETCH))
812 				return NULL;
813 
814 			/*
815 			 * If we're below a transparent bridge, there may
816 			 * be both a positively-decoded aperture and a
817 			 * subtractively-decoded region that contain the BAR.
818 			 * We want the positively-decoded one, so this depends
819 			 * on pci_bus_for_each_resource() giving us those
820 			 * first.
821 			 */
822 			return r;
823 		}
824 	}
825 	return NULL;
826 }
827 EXPORT_SYMBOL(pci_find_parent_resource);
828 
829 /**
830  * pci_find_resource - Return matching PCI device resource
831  * @dev: PCI device to query
832  * @res: Resource to look for
833  *
834  * Goes over standard PCI resources (BARs) and checks if the given resource
835  * is partially or fully contained in any of them. In that case the
836  * matching resource is returned, %NULL otherwise.
837  */
838 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
839 {
840 	int i;
841 
842 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
843 		struct resource *r = &dev->resource[i];
844 
845 		if (r->start && resource_contains(r, res))
846 			return r;
847 	}
848 
849 	return NULL;
850 }
851 EXPORT_SYMBOL(pci_find_resource);
852 
853 /**
854  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
855  * @dev: the PCI device to operate on
856  * @pos: config space offset of status word
857  * @mask: mask of bit(s) to care about in status word
858  *
859  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
860  */
861 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
862 {
863 	int i;
864 
865 	/* Wait for Transaction Pending bit clean */
866 	for (i = 0; i < 4; i++) {
867 		u16 status;
868 		if (i)
869 			msleep((1 << (i - 1)) * 100);
870 
871 		pci_read_config_word(dev, pos, &status);
872 		if (!(status & mask))
873 			return 1;
874 	}
875 
876 	return 0;
877 }
878 
879 static int pci_acs_enable;
880 
881 /**
882  * pci_request_acs - ask for ACS to be enabled if supported
883  */
884 void pci_request_acs(void)
885 {
886 	pci_acs_enable = 1;
887 }
888 
889 static const char *disable_acs_redir_param;
890 
891 /**
892  * pci_disable_acs_redir - disable ACS redirect capabilities
893  * @dev: the PCI device
894  *
895  * For only devices specified in the disable_acs_redir parameter.
896  */
897 static void pci_disable_acs_redir(struct pci_dev *dev)
898 {
899 	int ret = 0;
900 	const char *p;
901 	int pos;
902 	u16 ctrl;
903 
904 	if (!disable_acs_redir_param)
905 		return;
906 
907 	p = disable_acs_redir_param;
908 	while (*p) {
909 		ret = pci_dev_str_match(dev, p, &p);
910 		if (ret < 0) {
911 			pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
912 				     disable_acs_redir_param);
913 
914 			break;
915 		} else if (ret == 1) {
916 			/* Found a match */
917 			break;
918 		}
919 
920 		if (*p != ';' && *p != ',') {
921 			/* End of param or invalid format */
922 			break;
923 		}
924 		p++;
925 	}
926 
927 	if (ret != 1)
928 		return;
929 
930 	if (!pci_dev_specific_disable_acs_redir(dev))
931 		return;
932 
933 	pos = dev->acs_cap;
934 	if (!pos) {
935 		pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
936 		return;
937 	}
938 
939 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
940 
941 	/* P2P Request & Completion Redirect */
942 	ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
943 
944 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
945 
946 	pci_info(dev, "disabled ACS redirect\n");
947 }
948 
949 /**
950  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
951  * @dev: the PCI device
952  */
953 static void pci_std_enable_acs(struct pci_dev *dev)
954 {
955 	int pos;
956 	u16 cap;
957 	u16 ctrl;
958 
959 	pos = dev->acs_cap;
960 	if (!pos)
961 		return;
962 
963 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
964 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
965 
966 	/* Source Validation */
967 	ctrl |= (cap & PCI_ACS_SV);
968 
969 	/* P2P Request Redirect */
970 	ctrl |= (cap & PCI_ACS_RR);
971 
972 	/* P2P Completion Redirect */
973 	ctrl |= (cap & PCI_ACS_CR);
974 
975 	/* Upstream Forwarding */
976 	ctrl |= (cap & PCI_ACS_UF);
977 
978 	/* Enable Translation Blocking for external devices and noats */
979 	if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
980 		ctrl |= (cap & PCI_ACS_TB);
981 
982 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
983 }
984 
985 /**
986  * pci_enable_acs - enable ACS if hardware support it
987  * @dev: the PCI device
988  */
989 static void pci_enable_acs(struct pci_dev *dev)
990 {
991 	if (!pci_acs_enable)
992 		goto disable_acs_redir;
993 
994 	if (!pci_dev_specific_enable_acs(dev))
995 		goto disable_acs_redir;
996 
997 	pci_std_enable_acs(dev);
998 
999 disable_acs_redir:
1000 	/*
1001 	 * Note: pci_disable_acs_redir() must be called even if ACS was not
1002 	 * enabled by the kernel because it may have been enabled by
1003 	 * platform firmware.  So if we are told to disable it, we should
1004 	 * always disable it after setting the kernel's default
1005 	 * preferences.
1006 	 */
1007 	pci_disable_acs_redir(dev);
1008 }
1009 
1010 /**
1011  * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
1012  * @dev: PCI device to have its BARs restored
1013  *
1014  * Restore the BAR values for a given device, so as to make it
1015  * accessible by its driver.
1016  */
1017 static void pci_restore_bars(struct pci_dev *dev)
1018 {
1019 	int i;
1020 
1021 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1022 		pci_update_resource(dev, i);
1023 }
1024 
1025 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1026 {
1027 	if (pci_use_mid_pm())
1028 		return true;
1029 
1030 	return acpi_pci_power_manageable(dev);
1031 }
1032 
1033 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1034 					       pci_power_t t)
1035 {
1036 	if (pci_use_mid_pm())
1037 		return mid_pci_set_power_state(dev, t);
1038 
1039 	return acpi_pci_set_power_state(dev, t);
1040 }
1041 
1042 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1043 {
1044 	if (pci_use_mid_pm())
1045 		return mid_pci_get_power_state(dev);
1046 
1047 	return acpi_pci_get_power_state(dev);
1048 }
1049 
1050 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1051 {
1052 	if (!pci_use_mid_pm())
1053 		acpi_pci_refresh_power_state(dev);
1054 }
1055 
1056 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1057 {
1058 	if (pci_use_mid_pm())
1059 		return PCI_POWER_ERROR;
1060 
1061 	return acpi_pci_choose_state(dev);
1062 }
1063 
1064 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1065 {
1066 	if (pci_use_mid_pm())
1067 		return PCI_POWER_ERROR;
1068 
1069 	return acpi_pci_wakeup(dev, enable);
1070 }
1071 
1072 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1073 {
1074 	if (pci_use_mid_pm())
1075 		return false;
1076 
1077 	return acpi_pci_need_resume(dev);
1078 }
1079 
1080 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1081 {
1082 	if (pci_use_mid_pm())
1083 		return false;
1084 
1085 	return acpi_pci_bridge_d3(dev);
1086 }
1087 
1088 /**
1089  * pci_update_current_state - Read power state of given device and cache it
1090  * @dev: PCI device to handle.
1091  * @state: State to cache in case the device doesn't have the PM capability
1092  *
1093  * The power state is read from the PMCSR register, which however is
1094  * inaccessible in D3cold.  The platform firmware is therefore queried first
1095  * to detect accessibility of the register.  In case the platform firmware
1096  * reports an incorrect state or the device isn't power manageable by the
1097  * platform at all, we try to detect D3cold by testing accessibility of the
1098  * vendor ID in config space.
1099  */
1100 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1101 {
1102 	if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1103 		dev->current_state = PCI_D3cold;
1104 	} else if (dev->pm_cap) {
1105 		u16 pmcsr;
1106 
1107 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1108 		if (PCI_POSSIBLE_ERROR(pmcsr)) {
1109 			dev->current_state = PCI_D3cold;
1110 			return;
1111 		}
1112 		dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1113 	} else {
1114 		dev->current_state = state;
1115 	}
1116 }
1117 
1118 /**
1119  * pci_refresh_power_state - Refresh the given device's power state data
1120  * @dev: Target PCI device.
1121  *
1122  * Ask the platform to refresh the devices power state information and invoke
1123  * pci_update_current_state() to update its current PCI power state.
1124  */
1125 void pci_refresh_power_state(struct pci_dev *dev)
1126 {
1127 	platform_pci_refresh_power_state(dev);
1128 	pci_update_current_state(dev, dev->current_state);
1129 }
1130 
1131 /**
1132  * pci_platform_power_transition - Use platform to change device power state
1133  * @dev: PCI device to handle.
1134  * @state: State to put the device into.
1135  */
1136 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1137 {
1138 	int error;
1139 
1140 	error = platform_pci_set_power_state(dev, state);
1141 	if (!error)
1142 		pci_update_current_state(dev, state);
1143 	else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1144 		dev->current_state = PCI_D0;
1145 
1146 	return error;
1147 }
1148 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1149 
1150 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1151 {
1152 	pm_request_resume(&pci_dev->dev);
1153 	return 0;
1154 }
1155 
1156 /**
1157  * pci_resume_bus - Walk given bus and runtime resume devices on it
1158  * @bus: Top bus of the subtree to walk.
1159  */
1160 void pci_resume_bus(struct pci_bus *bus)
1161 {
1162 	if (bus)
1163 		pci_walk_bus(bus, pci_resume_one, NULL);
1164 }
1165 
1166 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1167 {
1168 	int delay = 1;
1169 	bool retrain = false;
1170 	struct pci_dev *bridge;
1171 
1172 	if (pci_is_pcie(dev)) {
1173 		bridge = pci_upstream_bridge(dev);
1174 		if (bridge)
1175 			retrain = true;
1176 	}
1177 
1178 	/*
1179 	 * After reset, the device should not silently discard config
1180 	 * requests, but it may still indicate that it needs more time by
1181 	 * responding to them with CRS completions.  The Root Port will
1182 	 * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
1183 	 * the read (except when CRS SV is enabled and the read was for the
1184 	 * Vendor ID; in that case it synthesizes 0x0001 data).
1185 	 *
1186 	 * Wait for the device to return a non-CRS completion.  Read the
1187 	 * Command register instead of Vendor ID so we don't have to
1188 	 * contend with the CRS SV value.
1189 	 */
1190 	for (;;) {
1191 		u32 id;
1192 
1193 		pci_read_config_dword(dev, PCI_COMMAND, &id);
1194 		if (!PCI_POSSIBLE_ERROR(id))
1195 			break;
1196 
1197 		if (delay > timeout) {
1198 			pci_warn(dev, "not ready %dms after %s; giving up\n",
1199 				 delay - 1, reset_type);
1200 			return -ENOTTY;
1201 		}
1202 
1203 		if (delay > PCI_RESET_WAIT) {
1204 			if (retrain) {
1205 				retrain = false;
1206 				if (pcie_failed_link_retrain(bridge)) {
1207 					delay = 1;
1208 					continue;
1209 				}
1210 			}
1211 			pci_info(dev, "not ready %dms after %s; waiting\n",
1212 				 delay - 1, reset_type);
1213 		}
1214 
1215 		msleep(delay);
1216 		delay *= 2;
1217 	}
1218 
1219 	if (delay > PCI_RESET_WAIT)
1220 		pci_info(dev, "ready %dms after %s\n", delay - 1,
1221 			 reset_type);
1222 
1223 	return 0;
1224 }
1225 
1226 /**
1227  * pci_power_up - Put the given device into D0
1228  * @dev: PCI device to power up
1229  *
1230  * On success, return 0 or 1, depending on whether or not it is necessary to
1231  * restore the device's BARs subsequently (1 is returned in that case).
1232  *
1233  * On failure, return a negative error code.  Always return failure if @dev
1234  * lacks a Power Management Capability, even if the platform was able to
1235  * put the device in D0 via non-PCI means.
1236  */
1237 int pci_power_up(struct pci_dev *dev)
1238 {
1239 	bool need_restore;
1240 	pci_power_t state;
1241 	u16 pmcsr;
1242 
1243 	platform_pci_set_power_state(dev, PCI_D0);
1244 
1245 	if (!dev->pm_cap) {
1246 		state = platform_pci_get_power_state(dev);
1247 		if (state == PCI_UNKNOWN)
1248 			dev->current_state = PCI_D0;
1249 		else
1250 			dev->current_state = state;
1251 
1252 		return -EIO;
1253 	}
1254 
1255 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1256 	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1257 		pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1258 			pci_power_name(dev->current_state));
1259 		dev->current_state = PCI_D3cold;
1260 		return -EIO;
1261 	}
1262 
1263 	state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1264 
1265 	need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1266 			!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1267 
1268 	if (state == PCI_D0)
1269 		goto end;
1270 
1271 	/*
1272 	 * Force the entire word to 0. This doesn't affect PME_Status, disables
1273 	 * PME_En, and sets PowerState to 0.
1274 	 */
1275 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1276 
1277 	/* Mandatory transition delays; see PCI PM 1.2. */
1278 	if (state == PCI_D3hot)
1279 		pci_dev_d3_sleep(dev);
1280 	else if (state == PCI_D2)
1281 		udelay(PCI_PM_D2_DELAY);
1282 
1283 end:
1284 	dev->current_state = PCI_D0;
1285 	if (need_restore)
1286 		return 1;
1287 
1288 	return 0;
1289 }
1290 
1291 /**
1292  * pci_set_full_power_state - Put a PCI device into D0 and update its state
1293  * @dev: PCI device to power up
1294  *
1295  * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1296  * to confirm the state change, restore its BARs if they might be lost and
1297  * reconfigure ASPM in accordance with the new power state.
1298  *
1299  * If pci_restore_state() is going to be called right after a power state change
1300  * to D0, it is more efficient to use pci_power_up() directly instead of this
1301  * function.
1302  */
1303 static int pci_set_full_power_state(struct pci_dev *dev)
1304 {
1305 	u16 pmcsr;
1306 	int ret;
1307 
1308 	ret = pci_power_up(dev);
1309 	if (ret < 0) {
1310 		if (dev->current_state == PCI_D0)
1311 			return 0;
1312 
1313 		return ret;
1314 	}
1315 
1316 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1317 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1318 	if (dev->current_state != PCI_D0) {
1319 		pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1320 				     pci_power_name(dev->current_state));
1321 	} else if (ret > 0) {
1322 		/*
1323 		 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1324 		 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1325 		 * from D3hot to D0 _may_ perform an internal reset, thereby
1326 		 * going to "D0 Uninitialized" rather than "D0 Initialized".
1327 		 * For example, at least some versions of the 3c905B and the
1328 		 * 3c556B exhibit this behaviour.
1329 		 *
1330 		 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1331 		 * devices in a D3hot state at boot.  Consequently, we need to
1332 		 * restore at least the BARs so that the device will be
1333 		 * accessible to its driver.
1334 		 */
1335 		pci_restore_bars(dev);
1336 	}
1337 
1338 	if (dev->bus->self)
1339 		pcie_aspm_pm_state_change(dev->bus->self);
1340 
1341 	return 0;
1342 }
1343 
1344 /**
1345  * __pci_dev_set_current_state - Set current state of a PCI device
1346  * @dev: Device to handle
1347  * @data: pointer to state to be set
1348  */
1349 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1350 {
1351 	pci_power_t state = *(pci_power_t *)data;
1352 
1353 	dev->current_state = state;
1354 	return 0;
1355 }
1356 
1357 /**
1358  * pci_bus_set_current_state - Walk given bus and set current state of devices
1359  * @bus: Top bus of the subtree to walk.
1360  * @state: state to be set
1361  */
1362 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1363 {
1364 	if (bus)
1365 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1366 }
1367 
1368 /**
1369  * pci_set_low_power_state - Put a PCI device into a low-power state.
1370  * @dev: PCI device to handle.
1371  * @state: PCI power state (D1, D2, D3hot) to put the device into.
1372  *
1373  * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1374  *
1375  * RETURN VALUE:
1376  * -EINVAL if the requested state is invalid.
1377  * -EIO if device does not support PCI PM or its PM capabilities register has a
1378  * wrong version, or device doesn't support the requested state.
1379  * 0 if device already is in the requested state.
1380  * 0 if device's power state has been successfully changed.
1381  */
1382 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
1383 {
1384 	u16 pmcsr;
1385 
1386 	if (!dev->pm_cap)
1387 		return -EIO;
1388 
1389 	/*
1390 	 * Validate transition: We can enter D0 from any state, but if
1391 	 * we're already in a low-power state, we can only go deeper.  E.g.,
1392 	 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1393 	 * we'd have to go from D3 to D0, then to D1.
1394 	 */
1395 	if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1396 		pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1397 			pci_power_name(dev->current_state),
1398 			pci_power_name(state));
1399 		return -EINVAL;
1400 	}
1401 
1402 	/* Check if this device supports the desired state */
1403 	if ((state == PCI_D1 && !dev->d1_support)
1404 	   || (state == PCI_D2 && !dev->d2_support))
1405 		return -EIO;
1406 
1407 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1408 	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1409 		pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1410 			pci_power_name(dev->current_state),
1411 			pci_power_name(state));
1412 		dev->current_state = PCI_D3cold;
1413 		return -EIO;
1414 	}
1415 
1416 	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1417 	pmcsr |= state;
1418 
1419 	/* Enter specified state */
1420 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1421 
1422 	/* Mandatory power management transition delays; see PCI PM 1.2. */
1423 	if (state == PCI_D3hot)
1424 		pci_dev_d3_sleep(dev);
1425 	else if (state == PCI_D2)
1426 		udelay(PCI_PM_D2_DELAY);
1427 
1428 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1429 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1430 	if (dev->current_state != state)
1431 		pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1432 				     pci_power_name(dev->current_state),
1433 				     pci_power_name(state));
1434 
1435 	if (dev->bus->self)
1436 		pcie_aspm_pm_state_change(dev->bus->self);
1437 
1438 	return 0;
1439 }
1440 
1441 /**
1442  * pci_set_power_state - Set the power state of a PCI device
1443  * @dev: PCI device to handle.
1444  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1445  *
1446  * Transition a device to a new power state, using the platform firmware and/or
1447  * the device's PCI PM registers.
1448  *
1449  * RETURN VALUE:
1450  * -EINVAL if the requested state is invalid.
1451  * -EIO if device does not support PCI PM or its PM capabilities register has a
1452  * wrong version, or device doesn't support the requested state.
1453  * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1454  * 0 if device already is in the requested state.
1455  * 0 if the transition is to D3 but D3 is not supported.
1456  * 0 if device's power state has been successfully changed.
1457  */
1458 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1459 {
1460 	int error;
1461 
1462 	/* Bound the state we're entering */
1463 	if (state > PCI_D3cold)
1464 		state = PCI_D3cold;
1465 	else if (state < PCI_D0)
1466 		state = PCI_D0;
1467 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1468 
1469 		/*
1470 		 * If the device or the parent bridge do not support PCI
1471 		 * PM, ignore the request if we're doing anything other
1472 		 * than putting it into D0 (which would only happen on
1473 		 * boot).
1474 		 */
1475 		return 0;
1476 
1477 	/* Check if we're already there */
1478 	if (dev->current_state == state)
1479 		return 0;
1480 
1481 	if (state == PCI_D0)
1482 		return pci_set_full_power_state(dev);
1483 
1484 	/*
1485 	 * This device is quirked not to be put into D3, so don't put it in
1486 	 * D3
1487 	 */
1488 	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1489 		return 0;
1490 
1491 	if (state == PCI_D3cold) {
1492 		/*
1493 		 * To put the device in D3cold, put it into D3hot in the native
1494 		 * way, then put it into D3cold using platform ops.
1495 		 */
1496 		error = pci_set_low_power_state(dev, PCI_D3hot);
1497 
1498 		if (pci_platform_power_transition(dev, PCI_D3cold))
1499 			return error;
1500 
1501 		/* Powering off a bridge may power off the whole hierarchy */
1502 		if (dev->current_state == PCI_D3cold)
1503 			pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1504 	} else {
1505 		error = pci_set_low_power_state(dev, state);
1506 
1507 		if (pci_platform_power_transition(dev, state))
1508 			return error;
1509 	}
1510 
1511 	return 0;
1512 }
1513 EXPORT_SYMBOL(pci_set_power_state);
1514 
1515 #define PCI_EXP_SAVE_REGS	7
1516 
1517 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1518 						       u16 cap, bool extended)
1519 {
1520 	struct pci_cap_saved_state *tmp;
1521 
1522 	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1523 		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1524 			return tmp;
1525 	}
1526 	return NULL;
1527 }
1528 
1529 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1530 {
1531 	return _pci_find_saved_cap(dev, cap, false);
1532 }
1533 
1534 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1535 {
1536 	return _pci_find_saved_cap(dev, cap, true);
1537 }
1538 
1539 static int pci_save_pcie_state(struct pci_dev *dev)
1540 {
1541 	int i = 0;
1542 	struct pci_cap_saved_state *save_state;
1543 	u16 *cap;
1544 
1545 	if (!pci_is_pcie(dev))
1546 		return 0;
1547 
1548 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1549 	if (!save_state) {
1550 		pci_err(dev, "buffer not found in %s\n", __func__);
1551 		return -ENOMEM;
1552 	}
1553 
1554 	cap = (u16 *)&save_state->cap.data[0];
1555 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1556 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1557 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1558 	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1559 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1560 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1561 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1562 
1563 	return 0;
1564 }
1565 
1566 void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
1567 {
1568 #ifdef CONFIG_PCIEASPM
1569 	struct pci_dev *bridge;
1570 	u32 ctl;
1571 
1572 	bridge = pci_upstream_bridge(dev);
1573 	if (bridge && bridge->ltr_path) {
1574 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1575 		if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1576 			pci_dbg(bridge, "re-enabling LTR\n");
1577 			pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1578 						 PCI_EXP_DEVCTL2_LTR_EN);
1579 		}
1580 	}
1581 #endif
1582 }
1583 
1584 static void pci_restore_pcie_state(struct pci_dev *dev)
1585 {
1586 	int i = 0;
1587 	struct pci_cap_saved_state *save_state;
1588 	u16 *cap;
1589 
1590 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1591 	if (!save_state)
1592 		return;
1593 
1594 	/*
1595 	 * Downstream ports reset the LTR enable bit when link goes down.
1596 	 * Check and re-configure the bit here before restoring device.
1597 	 * PCIe r5.0, sec 7.5.3.16.
1598 	 */
1599 	pci_bridge_reconfigure_ltr(dev);
1600 
1601 	cap = (u16 *)&save_state->cap.data[0];
1602 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1603 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1604 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1605 	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1606 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1607 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1608 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1609 }
1610 
1611 static int pci_save_pcix_state(struct pci_dev *dev)
1612 {
1613 	int pos;
1614 	struct pci_cap_saved_state *save_state;
1615 
1616 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1617 	if (!pos)
1618 		return 0;
1619 
1620 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1621 	if (!save_state) {
1622 		pci_err(dev, "buffer not found in %s\n", __func__);
1623 		return -ENOMEM;
1624 	}
1625 
1626 	pci_read_config_word(dev, pos + PCI_X_CMD,
1627 			     (u16 *)save_state->cap.data);
1628 
1629 	return 0;
1630 }
1631 
1632 static void pci_restore_pcix_state(struct pci_dev *dev)
1633 {
1634 	int i = 0, pos;
1635 	struct pci_cap_saved_state *save_state;
1636 	u16 *cap;
1637 
1638 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1639 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1640 	if (!save_state || !pos)
1641 		return;
1642 	cap = (u16 *)&save_state->cap.data[0];
1643 
1644 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1645 }
1646 
1647 static void pci_save_ltr_state(struct pci_dev *dev)
1648 {
1649 	int ltr;
1650 	struct pci_cap_saved_state *save_state;
1651 	u32 *cap;
1652 
1653 	if (!pci_is_pcie(dev))
1654 		return;
1655 
1656 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1657 	if (!ltr)
1658 		return;
1659 
1660 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1661 	if (!save_state) {
1662 		pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1663 		return;
1664 	}
1665 
1666 	/* Some broken devices only support dword access to LTR */
1667 	cap = &save_state->cap.data[0];
1668 	pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
1669 }
1670 
1671 static void pci_restore_ltr_state(struct pci_dev *dev)
1672 {
1673 	struct pci_cap_saved_state *save_state;
1674 	int ltr;
1675 	u32 *cap;
1676 
1677 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1678 	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1679 	if (!save_state || !ltr)
1680 		return;
1681 
1682 	/* Some broken devices only support dword access to LTR */
1683 	cap = &save_state->cap.data[0];
1684 	pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
1685 }
1686 
1687 /**
1688  * pci_save_state - save the PCI configuration space of a device before
1689  *		    suspending
1690  * @dev: PCI device that we're dealing with
1691  */
1692 int pci_save_state(struct pci_dev *dev)
1693 {
1694 	int i;
1695 	/* XXX: 100% dword access ok here? */
1696 	for (i = 0; i < 16; i++) {
1697 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1698 		pci_dbg(dev, "save config %#04x: %#010x\n",
1699 			i * 4, dev->saved_config_space[i]);
1700 	}
1701 	dev->state_saved = true;
1702 
1703 	i = pci_save_pcie_state(dev);
1704 	if (i != 0)
1705 		return i;
1706 
1707 	i = pci_save_pcix_state(dev);
1708 	if (i != 0)
1709 		return i;
1710 
1711 	pci_save_ltr_state(dev);
1712 	pci_save_dpc_state(dev);
1713 	pci_save_aer_state(dev);
1714 	pci_save_ptm_state(dev);
1715 	return pci_save_vc_state(dev);
1716 }
1717 EXPORT_SYMBOL(pci_save_state);
1718 
1719 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1720 				     u32 saved_val, int retry, bool force)
1721 {
1722 	u32 val;
1723 
1724 	pci_read_config_dword(pdev, offset, &val);
1725 	if (!force && val == saved_val)
1726 		return;
1727 
1728 	for (;;) {
1729 		pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n",
1730 			offset, val, saved_val);
1731 		pci_write_config_dword(pdev, offset, saved_val);
1732 		if (retry-- <= 0)
1733 			return;
1734 
1735 		pci_read_config_dword(pdev, offset, &val);
1736 		if (val == saved_val)
1737 			return;
1738 
1739 		mdelay(1);
1740 	}
1741 }
1742 
1743 static void pci_restore_config_space_range(struct pci_dev *pdev,
1744 					   int start, int end, int retry,
1745 					   bool force)
1746 {
1747 	int index;
1748 
1749 	for (index = end; index >= start; index--)
1750 		pci_restore_config_dword(pdev, 4 * index,
1751 					 pdev->saved_config_space[index],
1752 					 retry, force);
1753 }
1754 
1755 static void pci_restore_config_space(struct pci_dev *pdev)
1756 {
1757 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1758 		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1759 		/* Restore BARs before the command register. */
1760 		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1761 		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1762 	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1763 		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1764 
1765 		/*
1766 		 * Force rewriting of prefetch registers to avoid S3 resume
1767 		 * issues on Intel PCI bridges that occur when these
1768 		 * registers are not explicitly written.
1769 		 */
1770 		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1771 		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1772 	} else {
1773 		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1774 	}
1775 }
1776 
1777 static void pci_restore_rebar_state(struct pci_dev *pdev)
1778 {
1779 	unsigned int pos, nbars, i;
1780 	u32 ctrl;
1781 
1782 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1783 	if (!pos)
1784 		return;
1785 
1786 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1787 	nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
1788 
1789 	for (i = 0; i < nbars; i++, pos += 8) {
1790 		struct resource *res;
1791 		int bar_idx, size;
1792 
1793 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1794 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1795 		res = pdev->resource + bar_idx;
1796 		size = pci_rebar_bytes_to_size(resource_size(res));
1797 		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1798 		ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
1799 		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1800 	}
1801 }
1802 
1803 /**
1804  * pci_restore_state - Restore the saved state of a PCI device
1805  * @dev: PCI device that we're dealing with
1806  */
1807 void pci_restore_state(struct pci_dev *dev)
1808 {
1809 	if (!dev->state_saved)
1810 		return;
1811 
1812 	/*
1813 	 * Restore max latencies (in the LTR capability) before enabling
1814 	 * LTR itself (in the PCIe capability).
1815 	 */
1816 	pci_restore_ltr_state(dev);
1817 
1818 	pci_restore_pcie_state(dev);
1819 	pci_restore_pasid_state(dev);
1820 	pci_restore_pri_state(dev);
1821 	pci_restore_ats_state(dev);
1822 	pci_restore_vc_state(dev);
1823 	pci_restore_rebar_state(dev);
1824 	pci_restore_dpc_state(dev);
1825 	pci_restore_ptm_state(dev);
1826 
1827 	pci_aer_clear_status(dev);
1828 	pci_restore_aer_state(dev);
1829 
1830 	pci_restore_config_space(dev);
1831 
1832 	pci_restore_pcix_state(dev);
1833 	pci_restore_msi_state(dev);
1834 
1835 	/* Restore ACS and IOV configuration state */
1836 	pci_enable_acs(dev);
1837 	pci_restore_iov_state(dev);
1838 
1839 	dev->state_saved = false;
1840 }
1841 EXPORT_SYMBOL(pci_restore_state);
1842 
1843 struct pci_saved_state {
1844 	u32 config_space[16];
1845 	struct pci_cap_saved_data cap[];
1846 };
1847 
1848 /**
1849  * pci_store_saved_state - Allocate and return an opaque struct containing
1850  *			   the device saved state.
1851  * @dev: PCI device that we're dealing with
1852  *
1853  * Return NULL if no state or error.
1854  */
1855 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1856 {
1857 	struct pci_saved_state *state;
1858 	struct pci_cap_saved_state *tmp;
1859 	struct pci_cap_saved_data *cap;
1860 	size_t size;
1861 
1862 	if (!dev->state_saved)
1863 		return NULL;
1864 
1865 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1866 
1867 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1868 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1869 
1870 	state = kzalloc(size, GFP_KERNEL);
1871 	if (!state)
1872 		return NULL;
1873 
1874 	memcpy(state->config_space, dev->saved_config_space,
1875 	       sizeof(state->config_space));
1876 
1877 	cap = state->cap;
1878 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1879 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1880 		memcpy(cap, &tmp->cap, len);
1881 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1882 	}
1883 	/* Empty cap_save terminates list */
1884 
1885 	return state;
1886 }
1887 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1888 
1889 /**
1890  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1891  * @dev: PCI device that we're dealing with
1892  * @state: Saved state returned from pci_store_saved_state()
1893  */
1894 int pci_load_saved_state(struct pci_dev *dev,
1895 			 struct pci_saved_state *state)
1896 {
1897 	struct pci_cap_saved_data *cap;
1898 
1899 	dev->state_saved = false;
1900 
1901 	if (!state)
1902 		return 0;
1903 
1904 	memcpy(dev->saved_config_space, state->config_space,
1905 	       sizeof(state->config_space));
1906 
1907 	cap = state->cap;
1908 	while (cap->size) {
1909 		struct pci_cap_saved_state *tmp;
1910 
1911 		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1912 		if (!tmp || tmp->cap.size != cap->size)
1913 			return -EINVAL;
1914 
1915 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1916 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1917 		       sizeof(struct pci_cap_saved_data) + cap->size);
1918 	}
1919 
1920 	dev->state_saved = true;
1921 	return 0;
1922 }
1923 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1924 
1925 /**
1926  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1927  *				   and free the memory allocated for it.
1928  * @dev: PCI device that we're dealing with
1929  * @state: Pointer to saved state returned from pci_store_saved_state()
1930  */
1931 int pci_load_and_free_saved_state(struct pci_dev *dev,
1932 				  struct pci_saved_state **state)
1933 {
1934 	int ret = pci_load_saved_state(dev, *state);
1935 	kfree(*state);
1936 	*state = NULL;
1937 	return ret;
1938 }
1939 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1940 
1941 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1942 {
1943 	return pci_enable_resources(dev, bars);
1944 }
1945 
1946 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1947 {
1948 	int err;
1949 	struct pci_dev *bridge;
1950 	u16 cmd;
1951 	u8 pin;
1952 
1953 	err = pci_set_power_state(dev, PCI_D0);
1954 	if (err < 0 && err != -EIO)
1955 		return err;
1956 
1957 	bridge = pci_upstream_bridge(dev);
1958 	if (bridge)
1959 		pcie_aspm_powersave_config_link(bridge);
1960 
1961 	err = pcibios_enable_device(dev, bars);
1962 	if (err < 0)
1963 		return err;
1964 	pci_fixup_device(pci_fixup_enable, dev);
1965 
1966 	if (dev->msi_enabled || dev->msix_enabled)
1967 		return 0;
1968 
1969 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1970 	if (pin) {
1971 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1972 		if (cmd & PCI_COMMAND_INTX_DISABLE)
1973 			pci_write_config_word(dev, PCI_COMMAND,
1974 					      cmd & ~PCI_COMMAND_INTX_DISABLE);
1975 	}
1976 
1977 	return 0;
1978 }
1979 
1980 /**
1981  * pci_reenable_device - Resume abandoned device
1982  * @dev: PCI device to be resumed
1983  *
1984  * NOTE: This function is a backend of pci_default_resume() and is not supposed
1985  * to be called by normal code, write proper resume handler and use it instead.
1986  */
1987 int pci_reenable_device(struct pci_dev *dev)
1988 {
1989 	if (pci_is_enabled(dev))
1990 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1991 	return 0;
1992 }
1993 EXPORT_SYMBOL(pci_reenable_device);
1994 
1995 static void pci_enable_bridge(struct pci_dev *dev)
1996 {
1997 	struct pci_dev *bridge;
1998 	int retval;
1999 
2000 	bridge = pci_upstream_bridge(dev);
2001 	if (bridge)
2002 		pci_enable_bridge(bridge);
2003 
2004 	if (pci_is_enabled(dev)) {
2005 		if (!dev->is_busmaster)
2006 			pci_set_master(dev);
2007 		return;
2008 	}
2009 
2010 	retval = pci_enable_device(dev);
2011 	if (retval)
2012 		pci_err(dev, "Error enabling bridge (%d), continuing\n",
2013 			retval);
2014 	pci_set_master(dev);
2015 }
2016 
2017 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
2018 {
2019 	struct pci_dev *bridge;
2020 	int err;
2021 	int i, bars = 0;
2022 
2023 	/*
2024 	 * Power state could be unknown at this point, either due to a fresh
2025 	 * boot or a device removal call.  So get the current power state
2026 	 * so that things like MSI message writing will behave as expected
2027 	 * (e.g. if the device really is in D0 at enable time).
2028 	 */
2029 	pci_update_current_state(dev, dev->current_state);
2030 
2031 	if (atomic_inc_return(&dev->enable_cnt) > 1)
2032 		return 0;		/* already enabled */
2033 
2034 	bridge = pci_upstream_bridge(dev);
2035 	if (bridge)
2036 		pci_enable_bridge(bridge);
2037 
2038 	/* only skip sriov related */
2039 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2040 		if (dev->resource[i].flags & flags)
2041 			bars |= (1 << i);
2042 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2043 		if (dev->resource[i].flags & flags)
2044 			bars |= (1 << i);
2045 
2046 	err = do_pci_enable_device(dev, bars);
2047 	if (err < 0)
2048 		atomic_dec(&dev->enable_cnt);
2049 	return err;
2050 }
2051 
2052 /**
2053  * pci_enable_device_io - Initialize a device for use with IO space
2054  * @dev: PCI device to be initialized
2055  *
2056  * Initialize device before it's used by a driver. Ask low-level code
2057  * to enable I/O resources. Wake up the device if it was suspended.
2058  * Beware, this function can fail.
2059  */
2060 int pci_enable_device_io(struct pci_dev *dev)
2061 {
2062 	return pci_enable_device_flags(dev, IORESOURCE_IO);
2063 }
2064 EXPORT_SYMBOL(pci_enable_device_io);
2065 
2066 /**
2067  * pci_enable_device_mem - Initialize a device for use with Memory space
2068  * @dev: PCI device to be initialized
2069  *
2070  * Initialize device before it's used by a driver. Ask low-level code
2071  * to enable Memory resources. Wake up the device if it was suspended.
2072  * Beware, this function can fail.
2073  */
2074 int pci_enable_device_mem(struct pci_dev *dev)
2075 {
2076 	return pci_enable_device_flags(dev, IORESOURCE_MEM);
2077 }
2078 EXPORT_SYMBOL(pci_enable_device_mem);
2079 
2080 /**
2081  * pci_enable_device - Initialize device before it's used by a driver.
2082  * @dev: PCI device to be initialized
2083  *
2084  * Initialize device before it's used by a driver. Ask low-level code
2085  * to enable I/O and memory. Wake up the device if it was suspended.
2086  * Beware, this function can fail.
2087  *
2088  * Note we don't actually enable the device many times if we call
2089  * this function repeatedly (we just increment the count).
2090  */
2091 int pci_enable_device(struct pci_dev *dev)
2092 {
2093 	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2094 }
2095 EXPORT_SYMBOL(pci_enable_device);
2096 
2097 /*
2098  * Managed PCI resources.  This manages device on/off, INTx/MSI/MSI-X
2099  * on/off and BAR regions.  pci_dev itself records MSI/MSI-X status, so
2100  * there's no need to track it separately.  pci_devres is initialized
2101  * when a device is enabled using managed PCI device enable interface.
2102  */
2103 struct pci_devres {
2104 	unsigned int enabled:1;
2105 	unsigned int pinned:1;
2106 	unsigned int orig_intx:1;
2107 	unsigned int restore_intx:1;
2108 	unsigned int mwi:1;
2109 	u32 region_mask;
2110 };
2111 
2112 static void pcim_release(struct device *gendev, void *res)
2113 {
2114 	struct pci_dev *dev = to_pci_dev(gendev);
2115 	struct pci_devres *this = res;
2116 	int i;
2117 
2118 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
2119 		if (this->region_mask & (1 << i))
2120 			pci_release_region(dev, i);
2121 
2122 	if (this->mwi)
2123 		pci_clear_mwi(dev);
2124 
2125 	if (this->restore_intx)
2126 		pci_intx(dev, this->orig_intx);
2127 
2128 	if (this->enabled && !this->pinned)
2129 		pci_disable_device(dev);
2130 }
2131 
2132 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
2133 {
2134 	struct pci_devres *dr, *new_dr;
2135 
2136 	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2137 	if (dr)
2138 		return dr;
2139 
2140 	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2141 	if (!new_dr)
2142 		return NULL;
2143 	return devres_get(&pdev->dev, new_dr, NULL, NULL);
2144 }
2145 
2146 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2147 {
2148 	if (pci_is_managed(pdev))
2149 		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2150 	return NULL;
2151 }
2152 
2153 /**
2154  * pcim_enable_device - Managed pci_enable_device()
2155  * @pdev: PCI device to be initialized
2156  *
2157  * Managed pci_enable_device().
2158  */
2159 int pcim_enable_device(struct pci_dev *pdev)
2160 {
2161 	struct pci_devres *dr;
2162 	int rc;
2163 
2164 	dr = get_pci_dr(pdev);
2165 	if (unlikely(!dr))
2166 		return -ENOMEM;
2167 	if (dr->enabled)
2168 		return 0;
2169 
2170 	rc = pci_enable_device(pdev);
2171 	if (!rc) {
2172 		pdev->is_managed = 1;
2173 		dr->enabled = 1;
2174 	}
2175 	return rc;
2176 }
2177 EXPORT_SYMBOL(pcim_enable_device);
2178 
2179 /**
2180  * pcim_pin_device - Pin managed PCI device
2181  * @pdev: PCI device to pin
2182  *
2183  * Pin managed PCI device @pdev.  Pinned device won't be disabled on
2184  * driver detach.  @pdev must have been enabled with
2185  * pcim_enable_device().
2186  */
2187 void pcim_pin_device(struct pci_dev *pdev)
2188 {
2189 	struct pci_devres *dr;
2190 
2191 	dr = find_pci_dr(pdev);
2192 	WARN_ON(!dr || !dr->enabled);
2193 	if (dr)
2194 		dr->pinned = 1;
2195 }
2196 EXPORT_SYMBOL(pcim_pin_device);
2197 
2198 /*
2199  * pcibios_device_add - provide arch specific hooks when adding device dev
2200  * @dev: the PCI device being added
2201  *
2202  * Permits the platform to provide architecture specific functionality when
2203  * devices are added. This is the default implementation. Architecture
2204  * implementations can override this.
2205  */
2206 int __weak pcibios_device_add(struct pci_dev *dev)
2207 {
2208 	return 0;
2209 }
2210 
2211 /**
2212  * pcibios_release_device - provide arch specific hooks when releasing
2213  *			    device dev
2214  * @dev: the PCI device being released
2215  *
2216  * Permits the platform to provide architecture specific functionality when
2217  * devices are released. This is the default implementation. Architecture
2218  * implementations can override this.
2219  */
2220 void __weak pcibios_release_device(struct pci_dev *dev) {}
2221 
2222 /**
2223  * pcibios_disable_device - disable arch specific PCI resources for device dev
2224  * @dev: the PCI device to disable
2225  *
2226  * Disables architecture specific PCI resources for the device. This
2227  * is the default implementation. Architecture implementations can
2228  * override this.
2229  */
2230 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2231 
2232 /**
2233  * pcibios_penalize_isa_irq - penalize an ISA IRQ
2234  * @irq: ISA IRQ to penalize
2235  * @active: IRQ active or not
2236  *
2237  * Permits the platform to provide architecture-specific functionality when
2238  * penalizing ISA IRQs. This is the default implementation. Architecture
2239  * implementations can override this.
2240  */
2241 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2242 
2243 static void do_pci_disable_device(struct pci_dev *dev)
2244 {
2245 	u16 pci_command;
2246 
2247 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2248 	if (pci_command & PCI_COMMAND_MASTER) {
2249 		pci_command &= ~PCI_COMMAND_MASTER;
2250 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
2251 	}
2252 
2253 	pcibios_disable_device(dev);
2254 }
2255 
2256 /**
2257  * pci_disable_enabled_device - Disable device without updating enable_cnt
2258  * @dev: PCI device to disable
2259  *
2260  * NOTE: This function is a backend of PCI power management routines and is
2261  * not supposed to be called drivers.
2262  */
2263 void pci_disable_enabled_device(struct pci_dev *dev)
2264 {
2265 	if (pci_is_enabled(dev))
2266 		do_pci_disable_device(dev);
2267 }
2268 
2269 /**
2270  * pci_disable_device - Disable PCI device after use
2271  * @dev: PCI device to be disabled
2272  *
2273  * Signal to the system that the PCI device is not in use by the system
2274  * anymore.  This only involves disabling PCI bus-mastering, if active.
2275  *
2276  * Note we don't actually disable the device until all callers of
2277  * pci_enable_device() have called pci_disable_device().
2278  */
2279 void pci_disable_device(struct pci_dev *dev)
2280 {
2281 	struct pci_devres *dr;
2282 
2283 	dr = find_pci_dr(dev);
2284 	if (dr)
2285 		dr->enabled = 0;
2286 
2287 	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2288 		      "disabling already-disabled device");
2289 
2290 	if (atomic_dec_return(&dev->enable_cnt) != 0)
2291 		return;
2292 
2293 	do_pci_disable_device(dev);
2294 
2295 	dev->is_busmaster = 0;
2296 }
2297 EXPORT_SYMBOL(pci_disable_device);
2298 
2299 /**
2300  * pcibios_set_pcie_reset_state - set reset state for device dev
2301  * @dev: the PCIe device reset
2302  * @state: Reset state to enter into
2303  *
2304  * Set the PCIe reset state for the device. This is the default
2305  * implementation. Architecture implementations can override this.
2306  */
2307 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2308 					enum pcie_reset_state state)
2309 {
2310 	return -EINVAL;
2311 }
2312 
2313 /**
2314  * pci_set_pcie_reset_state - set reset state for device dev
2315  * @dev: the PCIe device reset
2316  * @state: Reset state to enter into
2317  *
2318  * Sets the PCI reset state for the device.
2319  */
2320 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2321 {
2322 	return pcibios_set_pcie_reset_state(dev, state);
2323 }
2324 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2325 
2326 #ifdef CONFIG_PCIEAER
2327 void pcie_clear_device_status(struct pci_dev *dev)
2328 {
2329 	u16 sta;
2330 
2331 	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2332 	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2333 }
2334 #endif
2335 
2336 /**
2337  * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2338  * @dev: PCIe root port or event collector.
2339  */
2340 void pcie_clear_root_pme_status(struct pci_dev *dev)
2341 {
2342 	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2343 }
2344 
2345 /**
2346  * pci_check_pme_status - Check if given device has generated PME.
2347  * @dev: Device to check.
2348  *
2349  * Check the PME status of the device and if set, clear it and clear PME enable
2350  * (if set).  Return 'true' if PME status and PME enable were both set or
2351  * 'false' otherwise.
2352  */
2353 bool pci_check_pme_status(struct pci_dev *dev)
2354 {
2355 	int pmcsr_pos;
2356 	u16 pmcsr;
2357 	bool ret = false;
2358 
2359 	if (!dev->pm_cap)
2360 		return false;
2361 
2362 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2363 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2364 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2365 		return false;
2366 
2367 	/* Clear PME status. */
2368 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2369 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2370 		/* Disable PME to avoid interrupt flood. */
2371 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2372 		ret = true;
2373 	}
2374 
2375 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2376 
2377 	return ret;
2378 }
2379 
2380 /**
2381  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2382  * @dev: Device to handle.
2383  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2384  *
2385  * Check if @dev has generated PME and queue a resume request for it in that
2386  * case.
2387  */
2388 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2389 {
2390 	if (pme_poll_reset && dev->pme_poll)
2391 		dev->pme_poll = false;
2392 
2393 	if (pci_check_pme_status(dev)) {
2394 		pci_wakeup_event(dev);
2395 		pm_request_resume(&dev->dev);
2396 	}
2397 	return 0;
2398 }
2399 
2400 /**
2401  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2402  * @bus: Top bus of the subtree to walk.
2403  */
2404 void pci_pme_wakeup_bus(struct pci_bus *bus)
2405 {
2406 	if (bus)
2407 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2408 }
2409 
2410 
2411 /**
2412  * pci_pme_capable - check the capability of PCI device to generate PME#
2413  * @dev: PCI device to handle.
2414  * @state: PCI state from which device will issue PME#.
2415  */
2416 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2417 {
2418 	if (!dev->pm_cap)
2419 		return false;
2420 
2421 	return !!(dev->pme_support & (1 << state));
2422 }
2423 EXPORT_SYMBOL(pci_pme_capable);
2424 
2425 static void pci_pme_list_scan(struct work_struct *work)
2426 {
2427 	struct pci_pme_device *pme_dev, *n;
2428 
2429 	mutex_lock(&pci_pme_list_mutex);
2430 	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2431 		struct pci_dev *pdev = pme_dev->dev;
2432 
2433 		if (pdev->pme_poll) {
2434 			struct pci_dev *bridge = pdev->bus->self;
2435 			struct device *dev = &pdev->dev;
2436 			int pm_status;
2437 
2438 			/*
2439 			 * If bridge is in low power state, the
2440 			 * configuration space of subordinate devices
2441 			 * may be not accessible
2442 			 */
2443 			if (bridge && bridge->current_state != PCI_D0)
2444 				continue;
2445 
2446 			/*
2447 			 * If the device is in a low power state it
2448 			 * should not be polled either.
2449 			 */
2450 			pm_status = pm_runtime_get_if_active(dev, true);
2451 			if (!pm_status)
2452 				continue;
2453 
2454 			if (pdev->current_state != PCI_D3cold)
2455 				pci_pme_wakeup(pdev, NULL);
2456 
2457 			if (pm_status > 0)
2458 				pm_runtime_put(dev);
2459 		} else {
2460 			list_del(&pme_dev->list);
2461 			kfree(pme_dev);
2462 		}
2463 	}
2464 	if (!list_empty(&pci_pme_list))
2465 		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2466 				   msecs_to_jiffies(PME_TIMEOUT));
2467 	mutex_unlock(&pci_pme_list_mutex);
2468 }
2469 
2470 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2471 {
2472 	u16 pmcsr;
2473 
2474 	if (!dev->pme_support)
2475 		return;
2476 
2477 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2478 	/* Clear PME_Status by writing 1 to it and enable PME# */
2479 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2480 	if (!enable)
2481 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2482 
2483 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2484 }
2485 
2486 /**
2487  * pci_pme_restore - Restore PME configuration after config space restore.
2488  * @dev: PCI device to update.
2489  */
2490 void pci_pme_restore(struct pci_dev *dev)
2491 {
2492 	u16 pmcsr;
2493 
2494 	if (!dev->pme_support)
2495 		return;
2496 
2497 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2498 	if (dev->wakeup_prepared) {
2499 		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2500 		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2501 	} else {
2502 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2503 		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2504 	}
2505 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2506 }
2507 
2508 /**
2509  * pci_pme_active - enable or disable PCI device's PME# function
2510  * @dev: PCI device to handle.
2511  * @enable: 'true' to enable PME# generation; 'false' to disable it.
2512  *
2513  * The caller must verify that the device is capable of generating PME# before
2514  * calling this function with @enable equal to 'true'.
2515  */
2516 void pci_pme_active(struct pci_dev *dev, bool enable)
2517 {
2518 	__pci_pme_active(dev, enable);
2519 
2520 	/*
2521 	 * PCI (as opposed to PCIe) PME requires that the device have
2522 	 * its PME# line hooked up correctly. Not all hardware vendors
2523 	 * do this, so the PME never gets delivered and the device
2524 	 * remains asleep. The easiest way around this is to
2525 	 * periodically walk the list of suspended devices and check
2526 	 * whether any have their PME flag set. The assumption is that
2527 	 * we'll wake up often enough anyway that this won't be a huge
2528 	 * hit, and the power savings from the devices will still be a
2529 	 * win.
2530 	 *
2531 	 * Although PCIe uses in-band PME message instead of PME# line
2532 	 * to report PME, PME does not work for some PCIe devices in
2533 	 * reality.  For example, there are devices that set their PME
2534 	 * status bits, but don't really bother to send a PME message;
2535 	 * there are PCI Express Root Ports that don't bother to
2536 	 * trigger interrupts when they receive PME messages from the
2537 	 * devices below.  So PME poll is used for PCIe devices too.
2538 	 */
2539 
2540 	if (dev->pme_poll) {
2541 		struct pci_pme_device *pme_dev;
2542 		if (enable) {
2543 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
2544 					  GFP_KERNEL);
2545 			if (!pme_dev) {
2546 				pci_warn(dev, "can't enable PME#\n");
2547 				return;
2548 			}
2549 			pme_dev->dev = dev;
2550 			mutex_lock(&pci_pme_list_mutex);
2551 			list_add(&pme_dev->list, &pci_pme_list);
2552 			if (list_is_singular(&pci_pme_list))
2553 				queue_delayed_work(system_freezable_wq,
2554 						   &pci_pme_work,
2555 						   msecs_to_jiffies(PME_TIMEOUT));
2556 			mutex_unlock(&pci_pme_list_mutex);
2557 		} else {
2558 			mutex_lock(&pci_pme_list_mutex);
2559 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2560 				if (pme_dev->dev == dev) {
2561 					list_del(&pme_dev->list);
2562 					kfree(pme_dev);
2563 					break;
2564 				}
2565 			}
2566 			mutex_unlock(&pci_pme_list_mutex);
2567 		}
2568 	}
2569 
2570 	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2571 }
2572 EXPORT_SYMBOL(pci_pme_active);
2573 
2574 /**
2575  * __pci_enable_wake - enable PCI device as wakeup event source
2576  * @dev: PCI device affected
2577  * @state: PCI state from which device will issue wakeup events
2578  * @enable: True to enable event generation; false to disable
2579  *
2580  * This enables the device as a wakeup event source, or disables it.
2581  * When such events involves platform-specific hooks, those hooks are
2582  * called automatically by this routine.
2583  *
2584  * Devices with legacy power management (no standard PCI PM capabilities)
2585  * always require such platform hooks.
2586  *
2587  * RETURN VALUE:
2588  * 0 is returned on success
2589  * -EINVAL is returned if device is not supposed to wake up the system
2590  * Error code depending on the platform is returned if both the platform and
2591  * the native mechanism fail to enable the generation of wake-up events
2592  */
2593 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2594 {
2595 	int ret = 0;
2596 
2597 	/*
2598 	 * Bridges that are not power-manageable directly only signal
2599 	 * wakeup on behalf of subordinate devices which is set up
2600 	 * elsewhere, so skip them. However, bridges that are
2601 	 * power-manageable may signal wakeup for themselves (for example,
2602 	 * on a hotplug event) and they need to be covered here.
2603 	 */
2604 	if (!pci_power_manageable(dev))
2605 		return 0;
2606 
2607 	/* Don't do the same thing twice in a row for one device. */
2608 	if (!!enable == !!dev->wakeup_prepared)
2609 		return 0;
2610 
2611 	/*
2612 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2613 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2614 	 * enable.  To disable wake-up we call the platform first, for symmetry.
2615 	 */
2616 
2617 	if (enable) {
2618 		int error;
2619 
2620 		/*
2621 		 * Enable PME signaling if the device can signal PME from
2622 		 * D3cold regardless of whether or not it can signal PME from
2623 		 * the current target state, because that will allow it to
2624 		 * signal PME when the hierarchy above it goes into D3cold and
2625 		 * the device itself ends up in D3cold as a result of that.
2626 		 */
2627 		if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2628 			pci_pme_active(dev, true);
2629 		else
2630 			ret = 1;
2631 		error = platform_pci_set_wakeup(dev, true);
2632 		if (ret)
2633 			ret = error;
2634 		if (!ret)
2635 			dev->wakeup_prepared = true;
2636 	} else {
2637 		platform_pci_set_wakeup(dev, false);
2638 		pci_pme_active(dev, false);
2639 		dev->wakeup_prepared = false;
2640 	}
2641 
2642 	return ret;
2643 }
2644 
2645 /**
2646  * pci_enable_wake - change wakeup settings for a PCI device
2647  * @pci_dev: Target device
2648  * @state: PCI state from which device will issue wakeup events
2649  * @enable: Whether or not to enable event generation
2650  *
2651  * If @enable is set, check device_may_wakeup() for the device before calling
2652  * __pci_enable_wake() for it.
2653  */
2654 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2655 {
2656 	if (enable && !device_may_wakeup(&pci_dev->dev))
2657 		return -EINVAL;
2658 
2659 	return __pci_enable_wake(pci_dev, state, enable);
2660 }
2661 EXPORT_SYMBOL(pci_enable_wake);
2662 
2663 /**
2664  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2665  * @dev: PCI device to prepare
2666  * @enable: True to enable wake-up event generation; false to disable
2667  *
2668  * Many drivers want the device to wake up the system from D3_hot or D3_cold
2669  * and this function allows them to set that up cleanly - pci_enable_wake()
2670  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2671  * ordering constraints.
2672  *
2673  * This function only returns error code if the device is not allowed to wake
2674  * up the system from sleep or it is not capable of generating PME# from both
2675  * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2676  */
2677 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2678 {
2679 	return pci_pme_capable(dev, PCI_D3cold) ?
2680 			pci_enable_wake(dev, PCI_D3cold, enable) :
2681 			pci_enable_wake(dev, PCI_D3hot, enable);
2682 }
2683 EXPORT_SYMBOL(pci_wake_from_d3);
2684 
2685 /**
2686  * pci_target_state - find an appropriate low power state for a given PCI dev
2687  * @dev: PCI device
2688  * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2689  *
2690  * Use underlying platform code to find a supported low power state for @dev.
2691  * If the platform can't manage @dev, return the deepest state from which it
2692  * can generate wake events, based on any available PME info.
2693  */
2694 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2695 {
2696 	if (platform_pci_power_manageable(dev)) {
2697 		/*
2698 		 * Call the platform to find the target state for the device.
2699 		 */
2700 		pci_power_t state = platform_pci_choose_state(dev);
2701 
2702 		switch (state) {
2703 		case PCI_POWER_ERROR:
2704 		case PCI_UNKNOWN:
2705 			return PCI_D3hot;
2706 
2707 		case PCI_D1:
2708 		case PCI_D2:
2709 			if (pci_no_d1d2(dev))
2710 				return PCI_D3hot;
2711 		}
2712 
2713 		return state;
2714 	}
2715 
2716 	/*
2717 	 * If the device is in D3cold even though it's not power-manageable by
2718 	 * the platform, it may have been powered down by non-standard means.
2719 	 * Best to let it slumber.
2720 	 */
2721 	if (dev->current_state == PCI_D3cold)
2722 		return PCI_D3cold;
2723 	else if (!dev->pm_cap)
2724 		return PCI_D0;
2725 
2726 	if (wakeup && dev->pme_support) {
2727 		pci_power_t state = PCI_D3hot;
2728 
2729 		/*
2730 		 * Find the deepest state from which the device can generate
2731 		 * PME#.
2732 		 */
2733 		while (state && !(dev->pme_support & (1 << state)))
2734 			state--;
2735 
2736 		if (state)
2737 			return state;
2738 		else if (dev->pme_support & 1)
2739 			return PCI_D0;
2740 	}
2741 
2742 	return PCI_D3hot;
2743 }
2744 
2745 /**
2746  * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2747  *			  into a sleep state
2748  * @dev: Device to handle.
2749  *
2750  * Choose the power state appropriate for the device depending on whether
2751  * it can wake up the system and/or is power manageable by the platform
2752  * (PCI_D3hot is the default) and put the device into that state.
2753  */
2754 int pci_prepare_to_sleep(struct pci_dev *dev)
2755 {
2756 	bool wakeup = device_may_wakeup(&dev->dev);
2757 	pci_power_t target_state = pci_target_state(dev, wakeup);
2758 	int error;
2759 
2760 	if (target_state == PCI_POWER_ERROR)
2761 		return -EIO;
2762 
2763 	pci_enable_wake(dev, target_state, wakeup);
2764 
2765 	error = pci_set_power_state(dev, target_state);
2766 
2767 	if (error)
2768 		pci_enable_wake(dev, target_state, false);
2769 
2770 	return error;
2771 }
2772 EXPORT_SYMBOL(pci_prepare_to_sleep);
2773 
2774 /**
2775  * pci_back_from_sleep - turn PCI device on during system-wide transition
2776  *			 into working state
2777  * @dev: Device to handle.
2778  *
2779  * Disable device's system wake-up capability and put it into D0.
2780  */
2781 int pci_back_from_sleep(struct pci_dev *dev)
2782 {
2783 	int ret = pci_set_power_state(dev, PCI_D0);
2784 
2785 	if (ret)
2786 		return ret;
2787 
2788 	pci_enable_wake(dev, PCI_D0, false);
2789 	return 0;
2790 }
2791 EXPORT_SYMBOL(pci_back_from_sleep);
2792 
2793 /**
2794  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2795  * @dev: PCI device being suspended.
2796  *
2797  * Prepare @dev to generate wake-up events at run time and put it into a low
2798  * power state.
2799  */
2800 int pci_finish_runtime_suspend(struct pci_dev *dev)
2801 {
2802 	pci_power_t target_state;
2803 	int error;
2804 
2805 	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2806 	if (target_state == PCI_POWER_ERROR)
2807 		return -EIO;
2808 
2809 	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2810 
2811 	error = pci_set_power_state(dev, target_state);
2812 
2813 	if (error)
2814 		pci_enable_wake(dev, target_state, false);
2815 
2816 	return error;
2817 }
2818 
2819 /**
2820  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2821  * @dev: Device to check.
2822  *
2823  * Return true if the device itself is capable of generating wake-up events
2824  * (through the platform or using the native PCIe PME) or if the device supports
2825  * PME and one of its upstream bridges can generate wake-up events.
2826  */
2827 bool pci_dev_run_wake(struct pci_dev *dev)
2828 {
2829 	struct pci_bus *bus = dev->bus;
2830 
2831 	if (!dev->pme_support)
2832 		return false;
2833 
2834 	/* PME-capable in principle, but not from the target power state */
2835 	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2836 		return false;
2837 
2838 	if (device_can_wakeup(&dev->dev))
2839 		return true;
2840 
2841 	while (bus->parent) {
2842 		struct pci_dev *bridge = bus->self;
2843 
2844 		if (device_can_wakeup(&bridge->dev))
2845 			return true;
2846 
2847 		bus = bus->parent;
2848 	}
2849 
2850 	/* We have reached the root bus. */
2851 	if (bus->bridge)
2852 		return device_can_wakeup(bus->bridge);
2853 
2854 	return false;
2855 }
2856 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2857 
2858 /**
2859  * pci_dev_need_resume - Check if it is necessary to resume the device.
2860  * @pci_dev: Device to check.
2861  *
2862  * Return 'true' if the device is not runtime-suspended or it has to be
2863  * reconfigured due to wakeup settings difference between system and runtime
2864  * suspend, or the current power state of it is not suitable for the upcoming
2865  * (system-wide) transition.
2866  */
2867 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2868 {
2869 	struct device *dev = &pci_dev->dev;
2870 	pci_power_t target_state;
2871 
2872 	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2873 		return true;
2874 
2875 	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2876 
2877 	/*
2878 	 * If the earlier platform check has not triggered, D3cold is just power
2879 	 * removal on top of D3hot, so no need to resume the device in that
2880 	 * case.
2881 	 */
2882 	return target_state != pci_dev->current_state &&
2883 		target_state != PCI_D3cold &&
2884 		pci_dev->current_state != PCI_D3hot;
2885 }
2886 
2887 /**
2888  * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2889  * @pci_dev: Device to check.
2890  *
2891  * If the device is suspended and it is not configured for system wakeup,
2892  * disable PME for it to prevent it from waking up the system unnecessarily.
2893  *
2894  * Note that if the device's power state is D3cold and the platform check in
2895  * pci_dev_need_resume() has not triggered, the device's configuration need not
2896  * be changed.
2897  */
2898 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2899 {
2900 	struct device *dev = &pci_dev->dev;
2901 
2902 	spin_lock_irq(&dev->power.lock);
2903 
2904 	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2905 	    pci_dev->current_state < PCI_D3cold)
2906 		__pci_pme_active(pci_dev, false);
2907 
2908 	spin_unlock_irq(&dev->power.lock);
2909 }
2910 
2911 /**
2912  * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2913  * @pci_dev: Device to handle.
2914  *
2915  * If the device is runtime suspended and wakeup-capable, enable PME for it as
2916  * it might have been disabled during the prepare phase of system suspend if
2917  * the device was not configured for system wakeup.
2918  */
2919 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2920 {
2921 	struct device *dev = &pci_dev->dev;
2922 
2923 	if (!pci_dev_run_wake(pci_dev))
2924 		return;
2925 
2926 	spin_lock_irq(&dev->power.lock);
2927 
2928 	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2929 		__pci_pme_active(pci_dev, true);
2930 
2931 	spin_unlock_irq(&dev->power.lock);
2932 }
2933 
2934 /**
2935  * pci_choose_state - Choose the power state of a PCI device.
2936  * @dev: Target PCI device.
2937  * @state: Target state for the whole system.
2938  *
2939  * Returns PCI power state suitable for @dev and @state.
2940  */
2941 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2942 {
2943 	if (state.event == PM_EVENT_ON)
2944 		return PCI_D0;
2945 
2946 	return pci_target_state(dev, false);
2947 }
2948 EXPORT_SYMBOL(pci_choose_state);
2949 
2950 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2951 {
2952 	struct device *dev = &pdev->dev;
2953 	struct device *parent = dev->parent;
2954 
2955 	if (parent)
2956 		pm_runtime_get_sync(parent);
2957 	pm_runtime_get_noresume(dev);
2958 	/*
2959 	 * pdev->current_state is set to PCI_D3cold during suspending,
2960 	 * so wait until suspending completes
2961 	 */
2962 	pm_runtime_barrier(dev);
2963 	/*
2964 	 * Only need to resume devices in D3cold, because config
2965 	 * registers are still accessible for devices suspended but
2966 	 * not in D3cold.
2967 	 */
2968 	if (pdev->current_state == PCI_D3cold)
2969 		pm_runtime_resume(dev);
2970 }
2971 
2972 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2973 {
2974 	struct device *dev = &pdev->dev;
2975 	struct device *parent = dev->parent;
2976 
2977 	pm_runtime_put(dev);
2978 	if (parent)
2979 		pm_runtime_put_sync(parent);
2980 }
2981 
2982 static const struct dmi_system_id bridge_d3_blacklist[] = {
2983 #ifdef CONFIG_X86
2984 	{
2985 		/*
2986 		 * Gigabyte X299 root port is not marked as hotplug capable
2987 		 * which allows Linux to power manage it.  However, this
2988 		 * confuses the BIOS SMI handler so don't power manage root
2989 		 * ports on that system.
2990 		 */
2991 		.ident = "X299 DESIGNARE EX-CF",
2992 		.matches = {
2993 			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2994 			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2995 		},
2996 	},
2997 	{
2998 		/*
2999 		 * Downstream device is not accessible after putting a root port
3000 		 * into D3cold and back into D0 on Elo Continental Z2 board
3001 		 */
3002 		.ident = "Elo Continental Z2",
3003 		.matches = {
3004 			DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
3005 			DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
3006 			DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
3007 		},
3008 	},
3009 #endif
3010 	{ }
3011 };
3012 
3013 /**
3014  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
3015  * @bridge: Bridge to check
3016  *
3017  * This function checks if it is possible to move the bridge to D3.
3018  * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
3019  */
3020 bool pci_bridge_d3_possible(struct pci_dev *bridge)
3021 {
3022 	if (!pci_is_pcie(bridge))
3023 		return false;
3024 
3025 	switch (pci_pcie_type(bridge)) {
3026 	case PCI_EXP_TYPE_ROOT_PORT:
3027 	case PCI_EXP_TYPE_UPSTREAM:
3028 	case PCI_EXP_TYPE_DOWNSTREAM:
3029 		if (pci_bridge_d3_disable)
3030 			return false;
3031 
3032 		/*
3033 		 * Hotplug ports handled by firmware in System Management Mode
3034 		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
3035 		 */
3036 		if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
3037 			return false;
3038 
3039 		if (pci_bridge_d3_force)
3040 			return true;
3041 
3042 		/* Even the oldest 2010 Thunderbolt controller supports D3. */
3043 		if (bridge->is_thunderbolt)
3044 			return true;
3045 
3046 		/* Platform might know better if the bridge supports D3 */
3047 		if (platform_pci_bridge_d3(bridge))
3048 			return true;
3049 
3050 		/*
3051 		 * Hotplug ports handled natively by the OS were not validated
3052 		 * by vendors for runtime D3 at least until 2018 because there
3053 		 * was no OS support.
3054 		 */
3055 		if (bridge->is_hotplug_bridge)
3056 			return false;
3057 
3058 		if (dmi_check_system(bridge_d3_blacklist))
3059 			return false;
3060 
3061 		/*
3062 		 * It should be safe to put PCIe ports from 2015 or newer
3063 		 * to D3.
3064 		 */
3065 		if (dmi_get_bios_year() >= 2015)
3066 			return true;
3067 		break;
3068 	}
3069 
3070 	return false;
3071 }
3072 
3073 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3074 {
3075 	bool *d3cold_ok = data;
3076 
3077 	if (/* The device needs to be allowed to go D3cold ... */
3078 	    dev->no_d3cold || !dev->d3cold_allowed ||
3079 
3080 	    /* ... and if it is wakeup capable to do so from D3cold. */
3081 	    (device_may_wakeup(&dev->dev) &&
3082 	     !pci_pme_capable(dev, PCI_D3cold)) ||
3083 
3084 	    /* If it is a bridge it must be allowed to go to D3. */
3085 	    !pci_power_manageable(dev))
3086 
3087 		*d3cold_ok = false;
3088 
3089 	return !*d3cold_ok;
3090 }
3091 
3092 /*
3093  * pci_bridge_d3_update - Update bridge D3 capabilities
3094  * @dev: PCI device which is changed
3095  *
3096  * Update upstream bridge PM capabilities accordingly depending on if the
3097  * device PM configuration was changed or the device is being removed.  The
3098  * change is also propagated upstream.
3099  */
3100 void pci_bridge_d3_update(struct pci_dev *dev)
3101 {
3102 	bool remove = !device_is_registered(&dev->dev);
3103 	struct pci_dev *bridge;
3104 	bool d3cold_ok = true;
3105 
3106 	bridge = pci_upstream_bridge(dev);
3107 	if (!bridge || !pci_bridge_d3_possible(bridge))
3108 		return;
3109 
3110 	/*
3111 	 * If D3 is currently allowed for the bridge, removing one of its
3112 	 * children won't change that.
3113 	 */
3114 	if (remove && bridge->bridge_d3)
3115 		return;
3116 
3117 	/*
3118 	 * If D3 is currently allowed for the bridge and a child is added or
3119 	 * changed, disallowance of D3 can only be caused by that child, so
3120 	 * we only need to check that single device, not any of its siblings.
3121 	 *
3122 	 * If D3 is currently not allowed for the bridge, checking the device
3123 	 * first may allow us to skip checking its siblings.
3124 	 */
3125 	if (!remove)
3126 		pci_dev_check_d3cold(dev, &d3cold_ok);
3127 
3128 	/*
3129 	 * If D3 is currently not allowed for the bridge, this may be caused
3130 	 * either by the device being changed/removed or any of its siblings,
3131 	 * so we need to go through all children to find out if one of them
3132 	 * continues to block D3.
3133 	 */
3134 	if (d3cold_ok && !bridge->bridge_d3)
3135 		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3136 			     &d3cold_ok);
3137 
3138 	if (bridge->bridge_d3 != d3cold_ok) {
3139 		bridge->bridge_d3 = d3cold_ok;
3140 		/* Propagate change to upstream bridges */
3141 		pci_bridge_d3_update(bridge);
3142 	}
3143 }
3144 
3145 /**
3146  * pci_d3cold_enable - Enable D3cold for device
3147  * @dev: PCI device to handle
3148  *
3149  * This function can be used in drivers to enable D3cold from the device
3150  * they handle.  It also updates upstream PCI bridge PM capabilities
3151  * accordingly.
3152  */
3153 void pci_d3cold_enable(struct pci_dev *dev)
3154 {
3155 	if (dev->no_d3cold) {
3156 		dev->no_d3cold = false;
3157 		pci_bridge_d3_update(dev);
3158 	}
3159 }
3160 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3161 
3162 /**
3163  * pci_d3cold_disable - Disable D3cold for device
3164  * @dev: PCI device to handle
3165  *
3166  * This function can be used in drivers to disable D3cold from the device
3167  * they handle.  It also updates upstream PCI bridge PM capabilities
3168  * accordingly.
3169  */
3170 void pci_d3cold_disable(struct pci_dev *dev)
3171 {
3172 	if (!dev->no_d3cold) {
3173 		dev->no_d3cold = true;
3174 		pci_bridge_d3_update(dev);
3175 	}
3176 }
3177 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3178 
3179 /**
3180  * pci_pm_init - Initialize PM functions of given PCI device
3181  * @dev: PCI device to handle.
3182  */
3183 void pci_pm_init(struct pci_dev *dev)
3184 {
3185 	int pm;
3186 	u16 status;
3187 	u16 pmc;
3188 
3189 	pm_runtime_forbid(&dev->dev);
3190 	pm_runtime_set_active(&dev->dev);
3191 	pm_runtime_enable(&dev->dev);
3192 	device_enable_async_suspend(&dev->dev);
3193 	dev->wakeup_prepared = false;
3194 
3195 	dev->pm_cap = 0;
3196 	dev->pme_support = 0;
3197 
3198 	/* find PCI PM capability in list */
3199 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3200 	if (!pm)
3201 		return;
3202 	/* Check device's ability to generate PME# */
3203 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3204 
3205 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3206 		pci_err(dev, "unsupported PM cap regs version (%u)\n",
3207 			pmc & PCI_PM_CAP_VER_MASK);
3208 		return;
3209 	}
3210 
3211 	dev->pm_cap = pm;
3212 	dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3213 	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3214 	dev->bridge_d3 = pci_bridge_d3_possible(dev);
3215 	dev->d3cold_allowed = true;
3216 
3217 	dev->d1_support = false;
3218 	dev->d2_support = false;
3219 	if (!pci_no_d1d2(dev)) {
3220 		if (pmc & PCI_PM_CAP_D1)
3221 			dev->d1_support = true;
3222 		if (pmc & PCI_PM_CAP_D2)
3223 			dev->d2_support = true;
3224 
3225 		if (dev->d1_support || dev->d2_support)
3226 			pci_info(dev, "supports%s%s\n",
3227 				   dev->d1_support ? " D1" : "",
3228 				   dev->d2_support ? " D2" : "");
3229 	}
3230 
3231 	pmc &= PCI_PM_CAP_PME_MASK;
3232 	if (pmc) {
3233 		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3234 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3235 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3236 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3237 			 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3238 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3239 		dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
3240 		dev->pme_poll = true;
3241 		/*
3242 		 * Make device's PM flags reflect the wake-up capability, but
3243 		 * let the user space enable it to wake up the system as needed.
3244 		 */
3245 		device_set_wakeup_capable(&dev->dev, true);
3246 		/* Disable the PME# generation functionality */
3247 		pci_pme_active(dev, false);
3248 	}
3249 
3250 	pci_read_config_word(dev, PCI_STATUS, &status);
3251 	if (status & PCI_STATUS_IMM_READY)
3252 		dev->imm_ready = 1;
3253 }
3254 
3255 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3256 {
3257 	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3258 
3259 	switch (prop) {
3260 	case PCI_EA_P_MEM:
3261 	case PCI_EA_P_VF_MEM:
3262 		flags |= IORESOURCE_MEM;
3263 		break;
3264 	case PCI_EA_P_MEM_PREFETCH:
3265 	case PCI_EA_P_VF_MEM_PREFETCH:
3266 		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3267 		break;
3268 	case PCI_EA_P_IO:
3269 		flags |= IORESOURCE_IO;
3270 		break;
3271 	default:
3272 		return 0;
3273 	}
3274 
3275 	return flags;
3276 }
3277 
3278 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3279 					    u8 prop)
3280 {
3281 	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3282 		return &dev->resource[bei];
3283 #ifdef CONFIG_PCI_IOV
3284 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3285 		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3286 		return &dev->resource[PCI_IOV_RESOURCES +
3287 				      bei - PCI_EA_BEI_VF_BAR0];
3288 #endif
3289 	else if (bei == PCI_EA_BEI_ROM)
3290 		return &dev->resource[PCI_ROM_RESOURCE];
3291 	else
3292 		return NULL;
3293 }
3294 
3295 /* Read an Enhanced Allocation (EA) entry */
3296 static int pci_ea_read(struct pci_dev *dev, int offset)
3297 {
3298 	struct resource *res;
3299 	int ent_size, ent_offset = offset;
3300 	resource_size_t start, end;
3301 	unsigned long flags;
3302 	u32 dw0, bei, base, max_offset;
3303 	u8 prop;
3304 	bool support_64 = (sizeof(resource_size_t) >= 8);
3305 
3306 	pci_read_config_dword(dev, ent_offset, &dw0);
3307 	ent_offset += 4;
3308 
3309 	/* Entry size field indicates DWORDs after 1st */
3310 	ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2;
3311 
3312 	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3313 		goto out;
3314 
3315 	bei = FIELD_GET(PCI_EA_BEI, dw0);
3316 	prop = FIELD_GET(PCI_EA_PP, dw0);
3317 
3318 	/*
3319 	 * If the Property is in the reserved range, try the Secondary
3320 	 * Property instead.
3321 	 */
3322 	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3323 		prop = FIELD_GET(PCI_EA_SP, dw0);
3324 	if (prop > PCI_EA_P_BRIDGE_IO)
3325 		goto out;
3326 
3327 	res = pci_ea_get_resource(dev, bei, prop);
3328 	if (!res) {
3329 		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3330 		goto out;
3331 	}
3332 
3333 	flags = pci_ea_flags(dev, prop);
3334 	if (!flags) {
3335 		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3336 		goto out;
3337 	}
3338 
3339 	/* Read Base */
3340 	pci_read_config_dword(dev, ent_offset, &base);
3341 	start = (base & PCI_EA_FIELD_MASK);
3342 	ent_offset += 4;
3343 
3344 	/* Read MaxOffset */
3345 	pci_read_config_dword(dev, ent_offset, &max_offset);
3346 	ent_offset += 4;
3347 
3348 	/* Read Base MSBs (if 64-bit entry) */
3349 	if (base & PCI_EA_IS_64) {
3350 		u32 base_upper;
3351 
3352 		pci_read_config_dword(dev, ent_offset, &base_upper);
3353 		ent_offset += 4;
3354 
3355 		flags |= IORESOURCE_MEM_64;
3356 
3357 		/* entry starts above 32-bit boundary, can't use */
3358 		if (!support_64 && base_upper)
3359 			goto out;
3360 
3361 		if (support_64)
3362 			start |= ((u64)base_upper << 32);
3363 	}
3364 
3365 	end = start + (max_offset | 0x03);
3366 
3367 	/* Read MaxOffset MSBs (if 64-bit entry) */
3368 	if (max_offset & PCI_EA_IS_64) {
3369 		u32 max_offset_upper;
3370 
3371 		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3372 		ent_offset += 4;
3373 
3374 		flags |= IORESOURCE_MEM_64;
3375 
3376 		/* entry too big, can't use */
3377 		if (!support_64 && max_offset_upper)
3378 			goto out;
3379 
3380 		if (support_64)
3381 			end += ((u64)max_offset_upper << 32);
3382 	}
3383 
3384 	if (end < start) {
3385 		pci_err(dev, "EA Entry crosses address boundary\n");
3386 		goto out;
3387 	}
3388 
3389 	if (ent_size != ent_offset - offset) {
3390 		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3391 			ent_size, ent_offset - offset);
3392 		goto out;
3393 	}
3394 
3395 	res->name = pci_name(dev);
3396 	res->start = start;
3397 	res->end = end;
3398 	res->flags = flags;
3399 
3400 	if (bei <= PCI_EA_BEI_BAR5)
3401 		pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3402 			   bei, res, prop);
3403 	else if (bei == PCI_EA_BEI_ROM)
3404 		pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3405 			   res, prop);
3406 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3407 		pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3408 			   bei - PCI_EA_BEI_VF_BAR0, res, prop);
3409 	else
3410 		pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3411 			   bei, res, prop);
3412 
3413 out:
3414 	return offset + ent_size;
3415 }
3416 
3417 /* Enhanced Allocation Initialization */
3418 void pci_ea_init(struct pci_dev *dev)
3419 {
3420 	int ea;
3421 	u8 num_ent;
3422 	int offset;
3423 	int i;
3424 
3425 	/* find PCI EA capability in list */
3426 	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3427 	if (!ea)
3428 		return;
3429 
3430 	/* determine the number of entries */
3431 	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3432 					&num_ent);
3433 	num_ent &= PCI_EA_NUM_ENT_MASK;
3434 
3435 	offset = ea + PCI_EA_FIRST_ENT;
3436 
3437 	/* Skip DWORD 2 for type 1 functions */
3438 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3439 		offset += 4;
3440 
3441 	/* parse each EA entry */
3442 	for (i = 0; i < num_ent; ++i)
3443 		offset = pci_ea_read(dev, offset);
3444 }
3445 
3446 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3447 	struct pci_cap_saved_state *new_cap)
3448 {
3449 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3450 }
3451 
3452 /**
3453  * _pci_add_cap_save_buffer - allocate buffer for saving given
3454  *			      capability registers
3455  * @dev: the PCI device
3456  * @cap: the capability to allocate the buffer for
3457  * @extended: Standard or Extended capability ID
3458  * @size: requested size of the buffer
3459  */
3460 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3461 				    bool extended, unsigned int size)
3462 {
3463 	int pos;
3464 	struct pci_cap_saved_state *save_state;
3465 
3466 	if (extended)
3467 		pos = pci_find_ext_capability(dev, cap);
3468 	else
3469 		pos = pci_find_capability(dev, cap);
3470 
3471 	if (!pos)
3472 		return 0;
3473 
3474 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3475 	if (!save_state)
3476 		return -ENOMEM;
3477 
3478 	save_state->cap.cap_nr = cap;
3479 	save_state->cap.cap_extended = extended;
3480 	save_state->cap.size = size;
3481 	pci_add_saved_cap(dev, save_state);
3482 
3483 	return 0;
3484 }
3485 
3486 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3487 {
3488 	return _pci_add_cap_save_buffer(dev, cap, false, size);
3489 }
3490 
3491 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3492 {
3493 	return _pci_add_cap_save_buffer(dev, cap, true, size);
3494 }
3495 
3496 /**
3497  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3498  * @dev: the PCI device
3499  */
3500 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3501 {
3502 	int error;
3503 
3504 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3505 					PCI_EXP_SAVE_REGS * sizeof(u16));
3506 	if (error)
3507 		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3508 
3509 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3510 	if (error)
3511 		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3512 
3513 	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3514 					    2 * sizeof(u16));
3515 	if (error)
3516 		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3517 
3518 	pci_allocate_vc_save_buffers(dev);
3519 }
3520 
3521 void pci_free_cap_save_buffers(struct pci_dev *dev)
3522 {
3523 	struct pci_cap_saved_state *tmp;
3524 	struct hlist_node *n;
3525 
3526 	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3527 		kfree(tmp);
3528 }
3529 
3530 /**
3531  * pci_configure_ari - enable or disable ARI forwarding
3532  * @dev: the PCI device
3533  *
3534  * If @dev and its upstream bridge both support ARI, enable ARI in the
3535  * bridge.  Otherwise, disable ARI in the bridge.
3536  */
3537 void pci_configure_ari(struct pci_dev *dev)
3538 {
3539 	u32 cap;
3540 	struct pci_dev *bridge;
3541 
3542 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3543 		return;
3544 
3545 	bridge = dev->bus->self;
3546 	if (!bridge)
3547 		return;
3548 
3549 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3550 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3551 		return;
3552 
3553 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3554 		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3555 					 PCI_EXP_DEVCTL2_ARI);
3556 		bridge->ari_enabled = 1;
3557 	} else {
3558 		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3559 					   PCI_EXP_DEVCTL2_ARI);
3560 		bridge->ari_enabled = 0;
3561 	}
3562 }
3563 
3564 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3565 {
3566 	int pos;
3567 	u16 cap, ctrl;
3568 
3569 	pos = pdev->acs_cap;
3570 	if (!pos)
3571 		return false;
3572 
3573 	/*
3574 	 * Except for egress control, capabilities are either required
3575 	 * or only required if controllable.  Features missing from the
3576 	 * capability field can therefore be assumed as hard-wired enabled.
3577 	 */
3578 	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3579 	acs_flags &= (cap | PCI_ACS_EC);
3580 
3581 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3582 	return (ctrl & acs_flags) == acs_flags;
3583 }
3584 
3585 /**
3586  * pci_acs_enabled - test ACS against required flags for a given device
3587  * @pdev: device to test
3588  * @acs_flags: required PCI ACS flags
3589  *
3590  * Return true if the device supports the provided flags.  Automatically
3591  * filters out flags that are not implemented on multifunction devices.
3592  *
3593  * Note that this interface checks the effective ACS capabilities of the
3594  * device rather than the actual capabilities.  For instance, most single
3595  * function endpoints are not required to support ACS because they have no
3596  * opportunity for peer-to-peer access.  We therefore return 'true'
3597  * regardless of whether the device exposes an ACS capability.  This makes
3598  * it much easier for callers of this function to ignore the actual type
3599  * or topology of the device when testing ACS support.
3600  */
3601 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3602 {
3603 	int ret;
3604 
3605 	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3606 	if (ret >= 0)
3607 		return ret > 0;
3608 
3609 	/*
3610 	 * Conventional PCI and PCI-X devices never support ACS, either
3611 	 * effectively or actually.  The shared bus topology implies that
3612 	 * any device on the bus can receive or snoop DMA.
3613 	 */
3614 	if (!pci_is_pcie(pdev))
3615 		return false;
3616 
3617 	switch (pci_pcie_type(pdev)) {
3618 	/*
3619 	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3620 	 * but since their primary interface is PCI/X, we conservatively
3621 	 * handle them as we would a non-PCIe device.
3622 	 */
3623 	case PCI_EXP_TYPE_PCIE_BRIDGE:
3624 	/*
3625 	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3626 	 * applicable... must never implement an ACS Extended Capability...".
3627 	 * This seems arbitrary, but we take a conservative interpretation
3628 	 * of this statement.
3629 	 */
3630 	case PCI_EXP_TYPE_PCI_BRIDGE:
3631 	case PCI_EXP_TYPE_RC_EC:
3632 		return false;
3633 	/*
3634 	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3635 	 * implement ACS in order to indicate their peer-to-peer capabilities,
3636 	 * regardless of whether they are single- or multi-function devices.
3637 	 */
3638 	case PCI_EXP_TYPE_DOWNSTREAM:
3639 	case PCI_EXP_TYPE_ROOT_PORT:
3640 		return pci_acs_flags_enabled(pdev, acs_flags);
3641 	/*
3642 	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3643 	 * implemented by the remaining PCIe types to indicate peer-to-peer
3644 	 * capabilities, but only when they are part of a multifunction
3645 	 * device.  The footnote for section 6.12 indicates the specific
3646 	 * PCIe types included here.
3647 	 */
3648 	case PCI_EXP_TYPE_ENDPOINT:
3649 	case PCI_EXP_TYPE_UPSTREAM:
3650 	case PCI_EXP_TYPE_LEG_END:
3651 	case PCI_EXP_TYPE_RC_END:
3652 		if (!pdev->multifunction)
3653 			break;
3654 
3655 		return pci_acs_flags_enabled(pdev, acs_flags);
3656 	}
3657 
3658 	/*
3659 	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3660 	 * to single function devices with the exception of downstream ports.
3661 	 */
3662 	return true;
3663 }
3664 
3665 /**
3666  * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3667  * @start: starting downstream device
3668  * @end: ending upstream device or NULL to search to the root bus
3669  * @acs_flags: required flags
3670  *
3671  * Walk up a device tree from start to end testing PCI ACS support.  If
3672  * any step along the way does not support the required flags, return false.
3673  */
3674 bool pci_acs_path_enabled(struct pci_dev *start,
3675 			  struct pci_dev *end, u16 acs_flags)
3676 {
3677 	struct pci_dev *pdev, *parent = start;
3678 
3679 	do {
3680 		pdev = parent;
3681 
3682 		if (!pci_acs_enabled(pdev, acs_flags))
3683 			return false;
3684 
3685 		if (pci_is_root_bus(pdev->bus))
3686 			return (end == NULL);
3687 
3688 		parent = pdev->bus->self;
3689 	} while (pdev != end);
3690 
3691 	return true;
3692 }
3693 
3694 /**
3695  * pci_acs_init - Initialize ACS if hardware supports it
3696  * @dev: the PCI device
3697  */
3698 void pci_acs_init(struct pci_dev *dev)
3699 {
3700 	dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3701 
3702 	/*
3703 	 * Attempt to enable ACS regardless of capability because some Root
3704 	 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3705 	 * the standard ACS capability but still support ACS via those
3706 	 * quirks.
3707 	 */
3708 	pci_enable_acs(dev);
3709 }
3710 
3711 /**
3712  * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3713  * @pdev: PCI device
3714  * @bar: BAR to find
3715  *
3716  * Helper to find the position of the ctrl register for a BAR.
3717  * Returns -ENOTSUPP if resizable BARs are not supported at all.
3718  * Returns -ENOENT if no ctrl register for the BAR could be found.
3719  */
3720 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3721 {
3722 	unsigned int pos, nbars, i;
3723 	u32 ctrl;
3724 
3725 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3726 	if (!pos)
3727 		return -ENOTSUPP;
3728 
3729 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3730 	nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
3731 
3732 	for (i = 0; i < nbars; i++, pos += 8) {
3733 		int bar_idx;
3734 
3735 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3736 		bar_idx = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, ctrl);
3737 		if (bar_idx == bar)
3738 			return pos;
3739 	}
3740 
3741 	return -ENOENT;
3742 }
3743 
3744 /**
3745  * pci_rebar_get_possible_sizes - get possible sizes for BAR
3746  * @pdev: PCI device
3747  * @bar: BAR to query
3748  *
3749  * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3750  * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3751  */
3752 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3753 {
3754 	int pos;
3755 	u32 cap;
3756 
3757 	pos = pci_rebar_find_pos(pdev, bar);
3758 	if (pos < 0)
3759 		return 0;
3760 
3761 	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3762 	cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
3763 
3764 	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3765 	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3766 	    bar == 0 && cap == 0x700)
3767 		return 0x3f00;
3768 
3769 	return cap;
3770 }
3771 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3772 
3773 /**
3774  * pci_rebar_get_current_size - get the current size of a BAR
3775  * @pdev: PCI device
3776  * @bar: BAR to set size to
3777  *
3778  * Read the size of a BAR from the resizable BAR config.
3779  * Returns size if found or negative error code.
3780  */
3781 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3782 {
3783 	int pos;
3784 	u32 ctrl;
3785 
3786 	pos = pci_rebar_find_pos(pdev, bar);
3787 	if (pos < 0)
3788 		return pos;
3789 
3790 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3791 	return FIELD_GET(PCI_REBAR_CTRL_BAR_SIZE, ctrl);
3792 }
3793 
3794 /**
3795  * pci_rebar_set_size - set a new size for a BAR
3796  * @pdev: PCI device
3797  * @bar: BAR to set size to
3798  * @size: new size as defined in the spec (0=1MB, 19=512GB)
3799  *
3800  * Set the new size of a BAR as defined in the spec.
3801  * Returns zero if resizing was successful, error code otherwise.
3802  */
3803 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3804 {
3805 	int pos;
3806 	u32 ctrl;
3807 
3808 	pos = pci_rebar_find_pos(pdev, bar);
3809 	if (pos < 0)
3810 		return pos;
3811 
3812 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3813 	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3814 	ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
3815 	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3816 	return 0;
3817 }
3818 
3819 /**
3820  * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3821  * @dev: the PCI device
3822  * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3823  *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3824  *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3825  *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3826  *
3827  * Return 0 if all upstream bridges support AtomicOp routing, egress
3828  * blocking is disabled on all upstream ports, and the root port supports
3829  * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3830  * AtomicOp completion), or negative otherwise.
3831  */
3832 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3833 {
3834 	struct pci_bus *bus = dev->bus;
3835 	struct pci_dev *bridge;
3836 	u32 cap, ctl2;
3837 
3838 	/*
3839 	 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3840 	 * in Device Control 2 is reserved in VFs and the PF value applies
3841 	 * to all associated VFs.
3842 	 */
3843 	if (dev->is_virtfn)
3844 		return -EINVAL;
3845 
3846 	if (!pci_is_pcie(dev))
3847 		return -EINVAL;
3848 
3849 	/*
3850 	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3851 	 * AtomicOp requesters.  For now, we only support endpoints as
3852 	 * requesters and root ports as completers.  No endpoints as
3853 	 * completers, and no peer-to-peer.
3854 	 */
3855 
3856 	switch (pci_pcie_type(dev)) {
3857 	case PCI_EXP_TYPE_ENDPOINT:
3858 	case PCI_EXP_TYPE_LEG_END:
3859 	case PCI_EXP_TYPE_RC_END:
3860 		break;
3861 	default:
3862 		return -EINVAL;
3863 	}
3864 
3865 	while (bus->parent) {
3866 		bridge = bus->self;
3867 
3868 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3869 
3870 		switch (pci_pcie_type(bridge)) {
3871 		/* Ensure switch ports support AtomicOp routing */
3872 		case PCI_EXP_TYPE_UPSTREAM:
3873 		case PCI_EXP_TYPE_DOWNSTREAM:
3874 			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3875 				return -EINVAL;
3876 			break;
3877 
3878 		/* Ensure root port supports all the sizes we care about */
3879 		case PCI_EXP_TYPE_ROOT_PORT:
3880 			if ((cap & cap_mask) != cap_mask)
3881 				return -EINVAL;
3882 			break;
3883 		}
3884 
3885 		/* Ensure upstream ports don't block AtomicOps on egress */
3886 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3887 			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3888 						   &ctl2);
3889 			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3890 				return -EINVAL;
3891 		}
3892 
3893 		bus = bus->parent;
3894 	}
3895 
3896 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3897 				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3898 	return 0;
3899 }
3900 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3901 
3902 /**
3903  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3904  * @dev: the PCI device
3905  * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3906  *
3907  * Perform INTx swizzling for a device behind one level of bridge.  This is
3908  * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3909  * behind bridges on add-in cards.  For devices with ARI enabled, the slot
3910  * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3911  * the PCI Express Base Specification, Revision 2.1)
3912  */
3913 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3914 {
3915 	int slot;
3916 
3917 	if (pci_ari_enabled(dev->bus))
3918 		slot = 0;
3919 	else
3920 		slot = PCI_SLOT(dev->devfn);
3921 
3922 	return (((pin - 1) + slot) % 4) + 1;
3923 }
3924 
3925 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3926 {
3927 	u8 pin;
3928 
3929 	pin = dev->pin;
3930 	if (!pin)
3931 		return -1;
3932 
3933 	while (!pci_is_root_bus(dev->bus)) {
3934 		pin = pci_swizzle_interrupt_pin(dev, pin);
3935 		dev = dev->bus->self;
3936 	}
3937 	*bridge = dev;
3938 	return pin;
3939 }
3940 
3941 /**
3942  * pci_common_swizzle - swizzle INTx all the way to root bridge
3943  * @dev: the PCI device
3944  * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3945  *
3946  * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
3947  * bridges all the way up to a PCI root bus.
3948  */
3949 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3950 {
3951 	u8 pin = *pinp;
3952 
3953 	while (!pci_is_root_bus(dev->bus)) {
3954 		pin = pci_swizzle_interrupt_pin(dev, pin);
3955 		dev = dev->bus->self;
3956 	}
3957 	*pinp = pin;
3958 	return PCI_SLOT(dev->devfn);
3959 }
3960 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3961 
3962 /**
3963  * pci_release_region - Release a PCI bar
3964  * @pdev: PCI device whose resources were previously reserved by
3965  *	  pci_request_region()
3966  * @bar: BAR to release
3967  *
3968  * Releases the PCI I/O and memory resources previously reserved by a
3969  * successful call to pci_request_region().  Call this function only
3970  * after all use of the PCI regions has ceased.
3971  */
3972 void pci_release_region(struct pci_dev *pdev, int bar)
3973 {
3974 	struct pci_devres *dr;
3975 
3976 	if (pci_resource_len(pdev, bar) == 0)
3977 		return;
3978 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3979 		release_region(pci_resource_start(pdev, bar),
3980 				pci_resource_len(pdev, bar));
3981 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3982 		release_mem_region(pci_resource_start(pdev, bar),
3983 				pci_resource_len(pdev, bar));
3984 
3985 	dr = find_pci_dr(pdev);
3986 	if (dr)
3987 		dr->region_mask &= ~(1 << bar);
3988 }
3989 EXPORT_SYMBOL(pci_release_region);
3990 
3991 /**
3992  * __pci_request_region - Reserved PCI I/O and memory resource
3993  * @pdev: PCI device whose resources are to be reserved
3994  * @bar: BAR to be reserved
3995  * @res_name: Name to be associated with resource.
3996  * @exclusive: whether the region access is exclusive or not
3997  *
3998  * Mark the PCI region associated with PCI device @pdev BAR @bar as
3999  * being reserved by owner @res_name.  Do not access any
4000  * address inside the PCI regions unless this call returns
4001  * successfully.
4002  *
4003  * If @exclusive is set, then the region is marked so that userspace
4004  * is explicitly not allowed to map the resource via /dev/mem or
4005  * sysfs MMIO access.
4006  *
4007  * Returns 0 on success, or %EBUSY on error.  A warning
4008  * message is also printed on failure.
4009  */
4010 static int __pci_request_region(struct pci_dev *pdev, int bar,
4011 				const char *res_name, int exclusive)
4012 {
4013 	struct pci_devres *dr;
4014 
4015 	if (pci_resource_len(pdev, bar) == 0)
4016 		return 0;
4017 
4018 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
4019 		if (!request_region(pci_resource_start(pdev, bar),
4020 			    pci_resource_len(pdev, bar), res_name))
4021 			goto err_out;
4022 	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
4023 		if (!__request_mem_region(pci_resource_start(pdev, bar),
4024 					pci_resource_len(pdev, bar), res_name,
4025 					exclusive))
4026 			goto err_out;
4027 	}
4028 
4029 	dr = find_pci_dr(pdev);
4030 	if (dr)
4031 		dr->region_mask |= 1 << bar;
4032 
4033 	return 0;
4034 
4035 err_out:
4036 	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
4037 		 &pdev->resource[bar]);
4038 	return -EBUSY;
4039 }
4040 
4041 /**
4042  * pci_request_region - Reserve PCI I/O and memory resource
4043  * @pdev: PCI device whose resources are to be reserved
4044  * @bar: BAR to be reserved
4045  * @res_name: Name to be associated with resource
4046  *
4047  * Mark the PCI region associated with PCI device @pdev BAR @bar as
4048  * being reserved by owner @res_name.  Do not access any
4049  * address inside the PCI regions unless this call returns
4050  * successfully.
4051  *
4052  * Returns 0 on success, or %EBUSY on error.  A warning
4053  * message is also printed on failure.
4054  */
4055 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
4056 {
4057 	return __pci_request_region(pdev, bar, res_name, 0);
4058 }
4059 EXPORT_SYMBOL(pci_request_region);
4060 
4061 /**
4062  * pci_release_selected_regions - Release selected PCI I/O and memory resources
4063  * @pdev: PCI device whose resources were previously reserved
4064  * @bars: Bitmask of BARs to be released
4065  *
4066  * Release selected PCI I/O and memory resources previously reserved.
4067  * Call this function only after all use of the PCI regions has ceased.
4068  */
4069 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
4070 {
4071 	int i;
4072 
4073 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
4074 		if (bars & (1 << i))
4075 			pci_release_region(pdev, i);
4076 }
4077 EXPORT_SYMBOL(pci_release_selected_regions);
4078 
4079 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4080 					  const char *res_name, int excl)
4081 {
4082 	int i;
4083 
4084 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
4085 		if (bars & (1 << i))
4086 			if (__pci_request_region(pdev, i, res_name, excl))
4087 				goto err_out;
4088 	return 0;
4089 
4090 err_out:
4091 	while (--i >= 0)
4092 		if (bars & (1 << i))
4093 			pci_release_region(pdev, i);
4094 
4095 	return -EBUSY;
4096 }
4097 
4098 
4099 /**
4100  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4101  * @pdev: PCI device whose resources are to be reserved
4102  * @bars: Bitmask of BARs to be requested
4103  * @res_name: Name to be associated with resource
4104  */
4105 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4106 				 const char *res_name)
4107 {
4108 	return __pci_request_selected_regions(pdev, bars, res_name, 0);
4109 }
4110 EXPORT_SYMBOL(pci_request_selected_regions);
4111 
4112 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4113 					   const char *res_name)
4114 {
4115 	return __pci_request_selected_regions(pdev, bars, res_name,
4116 			IORESOURCE_EXCLUSIVE);
4117 }
4118 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4119 
4120 /**
4121  * pci_release_regions - Release reserved PCI I/O and memory resources
4122  * @pdev: PCI device whose resources were previously reserved by
4123  *	  pci_request_regions()
4124  *
4125  * Releases all PCI I/O and memory resources previously reserved by a
4126  * successful call to pci_request_regions().  Call this function only
4127  * after all use of the PCI regions has ceased.
4128  */
4129 
4130 void pci_release_regions(struct pci_dev *pdev)
4131 {
4132 	pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4133 }
4134 EXPORT_SYMBOL(pci_release_regions);
4135 
4136 /**
4137  * pci_request_regions - Reserve PCI I/O and memory resources
4138  * @pdev: PCI device whose resources are to be reserved
4139  * @res_name: Name to be associated with resource.
4140  *
4141  * Mark all PCI regions associated with PCI device @pdev as
4142  * being reserved by owner @res_name.  Do not access any
4143  * address inside the PCI regions unless this call returns
4144  * successfully.
4145  *
4146  * Returns 0 on success, or %EBUSY on error.  A warning
4147  * message is also printed on failure.
4148  */
4149 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4150 {
4151 	return pci_request_selected_regions(pdev,
4152 			((1 << PCI_STD_NUM_BARS) - 1), res_name);
4153 }
4154 EXPORT_SYMBOL(pci_request_regions);
4155 
4156 /**
4157  * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4158  * @pdev: PCI device whose resources are to be reserved
4159  * @res_name: Name to be associated with resource.
4160  *
4161  * Mark all PCI regions associated with PCI device @pdev as being reserved
4162  * by owner @res_name.  Do not access any address inside the PCI regions
4163  * unless this call returns successfully.
4164  *
4165  * pci_request_regions_exclusive() will mark the region so that /dev/mem
4166  * and the sysfs MMIO access will not be allowed.
4167  *
4168  * Returns 0 on success, or %EBUSY on error.  A warning message is also
4169  * printed on failure.
4170  */
4171 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4172 {
4173 	return pci_request_selected_regions_exclusive(pdev,
4174 				((1 << PCI_STD_NUM_BARS) - 1), res_name);
4175 }
4176 EXPORT_SYMBOL(pci_request_regions_exclusive);
4177 
4178 /*
4179  * Record the PCI IO range (expressed as CPU physical address + size).
4180  * Return a negative value if an error has occurred, zero otherwise
4181  */
4182 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4183 			resource_size_t	size)
4184 {
4185 	int ret = 0;
4186 #ifdef PCI_IOBASE
4187 	struct logic_pio_hwaddr *range;
4188 
4189 	if (!size || addr + size < addr)
4190 		return -EINVAL;
4191 
4192 	range = kzalloc(sizeof(*range), GFP_ATOMIC);
4193 	if (!range)
4194 		return -ENOMEM;
4195 
4196 	range->fwnode = fwnode;
4197 	range->size = size;
4198 	range->hw_start = addr;
4199 	range->flags = LOGIC_PIO_CPU_MMIO;
4200 
4201 	ret = logic_pio_register_range(range);
4202 	if (ret)
4203 		kfree(range);
4204 
4205 	/* Ignore duplicates due to deferred probing */
4206 	if (ret == -EEXIST)
4207 		ret = 0;
4208 #endif
4209 
4210 	return ret;
4211 }
4212 
4213 phys_addr_t pci_pio_to_address(unsigned long pio)
4214 {
4215 #ifdef PCI_IOBASE
4216 	if (pio < MMIO_UPPER_LIMIT)
4217 		return logic_pio_to_hwaddr(pio);
4218 #endif
4219 
4220 	return (phys_addr_t) OF_BAD_ADDR;
4221 }
4222 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4223 
4224 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4225 {
4226 #ifdef PCI_IOBASE
4227 	return logic_pio_trans_cpuaddr(address);
4228 #else
4229 	if (address > IO_SPACE_LIMIT)
4230 		return (unsigned long)-1;
4231 
4232 	return (unsigned long) address;
4233 #endif
4234 }
4235 
4236 /**
4237  * pci_remap_iospace - Remap the memory mapped I/O space
4238  * @res: Resource describing the I/O space
4239  * @phys_addr: physical address of range to be mapped
4240  *
4241  * Remap the memory mapped I/O space described by the @res and the CPU
4242  * physical address @phys_addr into virtual address space.  Only
4243  * architectures that have memory mapped IO functions defined (and the
4244  * PCI_IOBASE value defined) should call this function.
4245  */
4246 #ifndef pci_remap_iospace
4247 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4248 {
4249 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4250 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4251 
4252 	if (!(res->flags & IORESOURCE_IO))
4253 		return -EINVAL;
4254 
4255 	if (res->end > IO_SPACE_LIMIT)
4256 		return -EINVAL;
4257 
4258 	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4259 				  pgprot_device(PAGE_KERNEL));
4260 #else
4261 	/*
4262 	 * This architecture does not have memory mapped I/O space,
4263 	 * so this function should never be called
4264 	 */
4265 	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4266 	return -ENODEV;
4267 #endif
4268 }
4269 EXPORT_SYMBOL(pci_remap_iospace);
4270 #endif
4271 
4272 /**
4273  * pci_unmap_iospace - Unmap the memory mapped I/O space
4274  * @res: resource to be unmapped
4275  *
4276  * Unmap the CPU virtual address @res from virtual address space.  Only
4277  * architectures that have memory mapped IO functions defined (and the
4278  * PCI_IOBASE value defined) should call this function.
4279  */
4280 void pci_unmap_iospace(struct resource *res)
4281 {
4282 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4283 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4284 
4285 	vunmap_range(vaddr, vaddr + resource_size(res));
4286 #endif
4287 }
4288 EXPORT_SYMBOL(pci_unmap_iospace);
4289 
4290 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4291 {
4292 	struct resource **res = ptr;
4293 
4294 	pci_unmap_iospace(*res);
4295 }
4296 
4297 /**
4298  * devm_pci_remap_iospace - Managed pci_remap_iospace()
4299  * @dev: Generic device to remap IO address for
4300  * @res: Resource describing the I/O space
4301  * @phys_addr: physical address of range to be mapped
4302  *
4303  * Managed pci_remap_iospace().  Map is automatically unmapped on driver
4304  * detach.
4305  */
4306 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4307 			   phys_addr_t phys_addr)
4308 {
4309 	const struct resource **ptr;
4310 	int error;
4311 
4312 	ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4313 	if (!ptr)
4314 		return -ENOMEM;
4315 
4316 	error = pci_remap_iospace(res, phys_addr);
4317 	if (error) {
4318 		devres_free(ptr);
4319 	} else	{
4320 		*ptr = res;
4321 		devres_add(dev, ptr);
4322 	}
4323 
4324 	return error;
4325 }
4326 EXPORT_SYMBOL(devm_pci_remap_iospace);
4327 
4328 /**
4329  * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4330  * @dev: Generic device to remap IO address for
4331  * @offset: Resource address to map
4332  * @size: Size of map
4333  *
4334  * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
4335  * detach.
4336  */
4337 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4338 				      resource_size_t offset,
4339 				      resource_size_t size)
4340 {
4341 	void __iomem **ptr, *addr;
4342 
4343 	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4344 	if (!ptr)
4345 		return NULL;
4346 
4347 	addr = pci_remap_cfgspace(offset, size);
4348 	if (addr) {
4349 		*ptr = addr;
4350 		devres_add(dev, ptr);
4351 	} else
4352 		devres_free(ptr);
4353 
4354 	return addr;
4355 }
4356 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4357 
4358 /**
4359  * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4360  * @dev: generic device to handle the resource for
4361  * @res: configuration space resource to be handled
4362  *
4363  * Checks that a resource is a valid memory region, requests the memory
4364  * region and ioremaps with pci_remap_cfgspace() API that ensures the
4365  * proper PCI configuration space memory attributes are guaranteed.
4366  *
4367  * All operations are managed and will be undone on driver detach.
4368  *
4369  * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4370  * on failure. Usage example::
4371  *
4372  *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4373  *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4374  *	if (IS_ERR(base))
4375  *		return PTR_ERR(base);
4376  */
4377 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4378 					  struct resource *res)
4379 {
4380 	resource_size_t size;
4381 	const char *name;
4382 	void __iomem *dest_ptr;
4383 
4384 	BUG_ON(!dev);
4385 
4386 	if (!res || resource_type(res) != IORESOURCE_MEM) {
4387 		dev_err(dev, "invalid resource\n");
4388 		return IOMEM_ERR_PTR(-EINVAL);
4389 	}
4390 
4391 	size = resource_size(res);
4392 
4393 	if (res->name)
4394 		name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4395 				      res->name);
4396 	else
4397 		name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4398 	if (!name)
4399 		return IOMEM_ERR_PTR(-ENOMEM);
4400 
4401 	if (!devm_request_mem_region(dev, res->start, size, name)) {
4402 		dev_err(dev, "can't request region for resource %pR\n", res);
4403 		return IOMEM_ERR_PTR(-EBUSY);
4404 	}
4405 
4406 	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4407 	if (!dest_ptr) {
4408 		dev_err(dev, "ioremap failed for resource %pR\n", res);
4409 		devm_release_mem_region(dev, res->start, size);
4410 		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4411 	}
4412 
4413 	return dest_ptr;
4414 }
4415 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4416 
4417 static void __pci_set_master(struct pci_dev *dev, bool enable)
4418 {
4419 	u16 old_cmd, cmd;
4420 
4421 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4422 	if (enable)
4423 		cmd = old_cmd | PCI_COMMAND_MASTER;
4424 	else
4425 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4426 	if (cmd != old_cmd) {
4427 		pci_dbg(dev, "%s bus mastering\n",
4428 			enable ? "enabling" : "disabling");
4429 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4430 	}
4431 	dev->is_busmaster = enable;
4432 }
4433 
4434 /**
4435  * pcibios_setup - process "pci=" kernel boot arguments
4436  * @str: string used to pass in "pci=" kernel boot arguments
4437  *
4438  * Process kernel boot arguments.  This is the default implementation.
4439  * Architecture specific implementations can override this as necessary.
4440  */
4441 char * __weak __init pcibios_setup(char *str)
4442 {
4443 	return str;
4444 }
4445 
4446 /**
4447  * pcibios_set_master - enable PCI bus-mastering for device dev
4448  * @dev: the PCI device to enable
4449  *
4450  * Enables PCI bus-mastering for the device.  This is the default
4451  * implementation.  Architecture specific implementations can override
4452  * this if necessary.
4453  */
4454 void __weak pcibios_set_master(struct pci_dev *dev)
4455 {
4456 	u8 lat;
4457 
4458 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4459 	if (pci_is_pcie(dev))
4460 		return;
4461 
4462 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4463 	if (lat < 16)
4464 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4465 	else if (lat > pcibios_max_latency)
4466 		lat = pcibios_max_latency;
4467 	else
4468 		return;
4469 
4470 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4471 }
4472 
4473 /**
4474  * pci_set_master - enables bus-mastering for device dev
4475  * @dev: the PCI device to enable
4476  *
4477  * Enables bus-mastering on the device and calls pcibios_set_master()
4478  * to do the needed arch specific settings.
4479  */
4480 void pci_set_master(struct pci_dev *dev)
4481 {
4482 	__pci_set_master(dev, true);
4483 	pcibios_set_master(dev);
4484 }
4485 EXPORT_SYMBOL(pci_set_master);
4486 
4487 /**
4488  * pci_clear_master - disables bus-mastering for device dev
4489  * @dev: the PCI device to disable
4490  */
4491 void pci_clear_master(struct pci_dev *dev)
4492 {
4493 	__pci_set_master(dev, false);
4494 }
4495 EXPORT_SYMBOL(pci_clear_master);
4496 
4497 /**
4498  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4499  * @dev: the PCI device for which MWI is to be enabled
4500  *
4501  * Helper function for pci_set_mwi.
4502  * Originally copied from drivers/net/acenic.c.
4503  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4504  *
4505  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4506  */
4507 int pci_set_cacheline_size(struct pci_dev *dev)
4508 {
4509 	u8 cacheline_size;
4510 
4511 	if (!pci_cache_line_size)
4512 		return -EINVAL;
4513 
4514 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4515 	   equal to or multiple of the right value. */
4516 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4517 	if (cacheline_size >= pci_cache_line_size &&
4518 	    (cacheline_size % pci_cache_line_size) == 0)
4519 		return 0;
4520 
4521 	/* Write the correct value. */
4522 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4523 	/* Read it back. */
4524 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4525 	if (cacheline_size == pci_cache_line_size)
4526 		return 0;
4527 
4528 	pci_dbg(dev, "cache line size of %d is not supported\n",
4529 		   pci_cache_line_size << 2);
4530 
4531 	return -EINVAL;
4532 }
4533 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4534 
4535 /**
4536  * pci_set_mwi - enables memory-write-invalidate PCI transaction
4537  * @dev: the PCI device for which MWI is enabled
4538  *
4539  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4540  *
4541  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4542  */
4543 int pci_set_mwi(struct pci_dev *dev)
4544 {
4545 #ifdef PCI_DISABLE_MWI
4546 	return 0;
4547 #else
4548 	int rc;
4549 	u16 cmd;
4550 
4551 	rc = pci_set_cacheline_size(dev);
4552 	if (rc)
4553 		return rc;
4554 
4555 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4556 	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4557 		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4558 		cmd |= PCI_COMMAND_INVALIDATE;
4559 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4560 	}
4561 	return 0;
4562 #endif
4563 }
4564 EXPORT_SYMBOL(pci_set_mwi);
4565 
4566 /**
4567  * pcim_set_mwi - a device-managed pci_set_mwi()
4568  * @dev: the PCI device for which MWI is enabled
4569  *
4570  * Managed pci_set_mwi().
4571  *
4572  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4573  */
4574 int pcim_set_mwi(struct pci_dev *dev)
4575 {
4576 	struct pci_devres *dr;
4577 
4578 	dr = find_pci_dr(dev);
4579 	if (!dr)
4580 		return -ENOMEM;
4581 
4582 	dr->mwi = 1;
4583 	return pci_set_mwi(dev);
4584 }
4585 EXPORT_SYMBOL(pcim_set_mwi);
4586 
4587 /**
4588  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4589  * @dev: the PCI device for which MWI is enabled
4590  *
4591  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4592  * Callers are not required to check the return value.
4593  *
4594  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4595  */
4596 int pci_try_set_mwi(struct pci_dev *dev)
4597 {
4598 #ifdef PCI_DISABLE_MWI
4599 	return 0;
4600 #else
4601 	return pci_set_mwi(dev);
4602 #endif
4603 }
4604 EXPORT_SYMBOL(pci_try_set_mwi);
4605 
4606 /**
4607  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4608  * @dev: the PCI device to disable
4609  *
4610  * Disables PCI Memory-Write-Invalidate transaction on the device
4611  */
4612 void pci_clear_mwi(struct pci_dev *dev)
4613 {
4614 #ifndef PCI_DISABLE_MWI
4615 	u16 cmd;
4616 
4617 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4618 	if (cmd & PCI_COMMAND_INVALIDATE) {
4619 		cmd &= ~PCI_COMMAND_INVALIDATE;
4620 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4621 	}
4622 #endif
4623 }
4624 EXPORT_SYMBOL(pci_clear_mwi);
4625 
4626 /**
4627  * pci_disable_parity - disable parity checking for device
4628  * @dev: the PCI device to operate on
4629  *
4630  * Disable parity checking for device @dev
4631  */
4632 void pci_disable_parity(struct pci_dev *dev)
4633 {
4634 	u16 cmd;
4635 
4636 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4637 	if (cmd & PCI_COMMAND_PARITY) {
4638 		cmd &= ~PCI_COMMAND_PARITY;
4639 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4640 	}
4641 }
4642 
4643 /**
4644  * pci_intx - enables/disables PCI INTx for device dev
4645  * @pdev: the PCI device to operate on
4646  * @enable: boolean: whether to enable or disable PCI INTx
4647  *
4648  * Enables/disables PCI INTx for device @pdev
4649  */
4650 void pci_intx(struct pci_dev *pdev, int enable)
4651 {
4652 	u16 pci_command, new;
4653 
4654 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4655 
4656 	if (enable)
4657 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4658 	else
4659 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
4660 
4661 	if (new != pci_command) {
4662 		struct pci_devres *dr;
4663 
4664 		pci_write_config_word(pdev, PCI_COMMAND, new);
4665 
4666 		dr = find_pci_dr(pdev);
4667 		if (dr && !dr->restore_intx) {
4668 			dr->restore_intx = 1;
4669 			dr->orig_intx = !enable;
4670 		}
4671 	}
4672 }
4673 EXPORT_SYMBOL_GPL(pci_intx);
4674 
4675 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4676 {
4677 	struct pci_bus *bus = dev->bus;
4678 	bool mask_updated = true;
4679 	u32 cmd_status_dword;
4680 	u16 origcmd, newcmd;
4681 	unsigned long flags;
4682 	bool irq_pending;
4683 
4684 	/*
4685 	 * We do a single dword read to retrieve both command and status.
4686 	 * Document assumptions that make this possible.
4687 	 */
4688 	BUILD_BUG_ON(PCI_COMMAND % 4);
4689 	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4690 
4691 	raw_spin_lock_irqsave(&pci_lock, flags);
4692 
4693 	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4694 
4695 	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4696 
4697 	/*
4698 	 * Check interrupt status register to see whether our device
4699 	 * triggered the interrupt (when masking) or the next IRQ is
4700 	 * already pending (when unmasking).
4701 	 */
4702 	if (mask != irq_pending) {
4703 		mask_updated = false;
4704 		goto done;
4705 	}
4706 
4707 	origcmd = cmd_status_dword;
4708 	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4709 	if (mask)
4710 		newcmd |= PCI_COMMAND_INTX_DISABLE;
4711 	if (newcmd != origcmd)
4712 		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4713 
4714 done:
4715 	raw_spin_unlock_irqrestore(&pci_lock, flags);
4716 
4717 	return mask_updated;
4718 }
4719 
4720 /**
4721  * pci_check_and_mask_intx - mask INTx on pending interrupt
4722  * @dev: the PCI device to operate on
4723  *
4724  * Check if the device dev has its INTx line asserted, mask it and return
4725  * true in that case. False is returned if no interrupt was pending.
4726  */
4727 bool pci_check_and_mask_intx(struct pci_dev *dev)
4728 {
4729 	return pci_check_and_set_intx_mask(dev, true);
4730 }
4731 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4732 
4733 /**
4734  * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4735  * @dev: the PCI device to operate on
4736  *
4737  * Check if the device dev has its INTx line asserted, unmask it if not and
4738  * return true. False is returned and the mask remains active if there was
4739  * still an interrupt pending.
4740  */
4741 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4742 {
4743 	return pci_check_and_set_intx_mask(dev, false);
4744 }
4745 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4746 
4747 /**
4748  * pci_wait_for_pending_transaction - wait for pending transaction
4749  * @dev: the PCI device to operate on
4750  *
4751  * Return 0 if transaction is pending 1 otherwise.
4752  */
4753 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4754 {
4755 	if (!pci_is_pcie(dev))
4756 		return 1;
4757 
4758 	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4759 				    PCI_EXP_DEVSTA_TRPND);
4760 }
4761 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4762 
4763 /**
4764  * pcie_flr - initiate a PCIe function level reset
4765  * @dev: device to reset
4766  *
4767  * Initiate a function level reset unconditionally on @dev without
4768  * checking any flags and DEVCAP
4769  */
4770 int pcie_flr(struct pci_dev *dev)
4771 {
4772 	if (!pci_wait_for_pending_transaction(dev))
4773 		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4774 
4775 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4776 
4777 	if (dev->imm_ready)
4778 		return 0;
4779 
4780 	/*
4781 	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4782 	 * 100ms, but may silently discard requests while the FLR is in
4783 	 * progress.  Wait 100ms before trying to access the device.
4784 	 */
4785 	msleep(100);
4786 
4787 	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4788 }
4789 EXPORT_SYMBOL_GPL(pcie_flr);
4790 
4791 /**
4792  * pcie_reset_flr - initiate a PCIe function level reset
4793  * @dev: device to reset
4794  * @probe: if true, return 0 if device can be reset this way
4795  *
4796  * Initiate a function level reset on @dev.
4797  */
4798 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4799 {
4800 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4801 		return -ENOTTY;
4802 
4803 	if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4804 		return -ENOTTY;
4805 
4806 	if (probe)
4807 		return 0;
4808 
4809 	return pcie_flr(dev);
4810 }
4811 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4812 
4813 static int pci_af_flr(struct pci_dev *dev, bool probe)
4814 {
4815 	int pos;
4816 	u8 cap;
4817 
4818 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4819 	if (!pos)
4820 		return -ENOTTY;
4821 
4822 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4823 		return -ENOTTY;
4824 
4825 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4826 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4827 		return -ENOTTY;
4828 
4829 	if (probe)
4830 		return 0;
4831 
4832 	/*
4833 	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4834 	 * is used, so we use the control offset rather than status and shift
4835 	 * the test bit to match.
4836 	 */
4837 	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4838 				 PCI_AF_STATUS_TP << 8))
4839 		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4840 
4841 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4842 
4843 	if (dev->imm_ready)
4844 		return 0;
4845 
4846 	/*
4847 	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4848 	 * updated 27 July 2006; a device must complete an FLR within
4849 	 * 100ms, but may silently discard requests while the FLR is in
4850 	 * progress.  Wait 100ms before trying to access the device.
4851 	 */
4852 	msleep(100);
4853 
4854 	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4855 }
4856 
4857 /**
4858  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4859  * @dev: Device to reset.
4860  * @probe: if true, return 0 if the device can be reset this way.
4861  *
4862  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4863  * unset, it will be reinitialized internally when going from PCI_D3hot to
4864  * PCI_D0.  If that's the case and the device is not in a low-power state
4865  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4866  *
4867  * NOTE: This causes the caller to sleep for twice the device power transition
4868  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4869  * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4870  * Moreover, only devices in D0 can be reset by this function.
4871  */
4872 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4873 {
4874 	u16 csr;
4875 
4876 	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4877 		return -ENOTTY;
4878 
4879 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4880 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4881 		return -ENOTTY;
4882 
4883 	if (probe)
4884 		return 0;
4885 
4886 	if (dev->current_state != PCI_D0)
4887 		return -EINVAL;
4888 
4889 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4890 	csr |= PCI_D3hot;
4891 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4892 	pci_dev_d3_sleep(dev);
4893 
4894 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4895 	csr |= PCI_D0;
4896 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4897 	pci_dev_d3_sleep(dev);
4898 
4899 	return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4900 }
4901 
4902 /**
4903  * pcie_wait_for_link_status - Wait for link status change
4904  * @pdev: Device whose link to wait for.
4905  * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE.
4906  * @active: Waiting for active or inactive?
4907  *
4908  * Return 0 if successful, or -ETIMEDOUT if status has not changed within
4909  * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4910  */
4911 static int pcie_wait_for_link_status(struct pci_dev *pdev,
4912 				     bool use_lt, bool active)
4913 {
4914 	u16 lnksta_mask, lnksta_match;
4915 	unsigned long end_jiffies;
4916 	u16 lnksta;
4917 
4918 	lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA;
4919 	lnksta_match = active ? lnksta_mask : 0;
4920 
4921 	end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS);
4922 	do {
4923 		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
4924 		if ((lnksta & lnksta_mask) == lnksta_match)
4925 			return 0;
4926 		msleep(1);
4927 	} while (time_before(jiffies, end_jiffies));
4928 
4929 	return -ETIMEDOUT;
4930 }
4931 
4932 /**
4933  * pcie_retrain_link - Request a link retrain and wait for it to complete
4934  * @pdev: Device whose link to retrain.
4935  * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
4936  *
4937  * Retrain completion status is retrieved from the Link Status Register
4938  * according to @use_lt.  It is not verified whether the use of the DLLLA
4939  * bit is valid.
4940  *
4941  * Return 0 if successful, or -ETIMEDOUT if training has not completed
4942  * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4943  */
4944 int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
4945 {
4946 	int rc;
4947 
4948 	/*
4949 	 * Ensure the updated LNKCTL parameters are used during link
4950 	 * training by checking that there is no ongoing link training to
4951 	 * avoid LTSSM race as recommended in Implementation Note at the
4952 	 * end of PCIe r6.0.1 sec 7.5.3.7.
4953 	 */
4954 	rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
4955 	if (rc)
4956 		return rc;
4957 
4958 	pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4959 	if (pdev->clear_retrain_link) {
4960 		/*
4961 		 * Due to an erratum in some devices the Retrain Link bit
4962 		 * needs to be cleared again manually to allow the link
4963 		 * training to succeed.
4964 		 */
4965 		pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4966 	}
4967 
4968 	return pcie_wait_for_link_status(pdev, use_lt, !use_lt);
4969 }
4970 
4971 /**
4972  * pcie_wait_for_link_delay - Wait until link is active or inactive
4973  * @pdev: Bridge device
4974  * @active: waiting for active or inactive?
4975  * @delay: Delay to wait after link has become active (in ms)
4976  *
4977  * Use this to wait till link becomes active or inactive.
4978  */
4979 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4980 				     int delay)
4981 {
4982 	int rc;
4983 
4984 	/*
4985 	 * Some controllers might not implement link active reporting. In this
4986 	 * case, we wait for 1000 ms + any delay requested by the caller.
4987 	 */
4988 	if (!pdev->link_active_reporting) {
4989 		msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay);
4990 		return true;
4991 	}
4992 
4993 	/*
4994 	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4995 	 * after which we should expect an link active if the reset was
4996 	 * successful. If so, software must wait a minimum 100ms before sending
4997 	 * configuration requests to devices downstream this port.
4998 	 *
4999 	 * If the link fails to activate, either the device was physically
5000 	 * removed or the link is permanently failed.
5001 	 */
5002 	if (active)
5003 		msleep(20);
5004 	rc = pcie_wait_for_link_status(pdev, false, active);
5005 	if (active) {
5006 		if (rc)
5007 			rc = pcie_failed_link_retrain(pdev);
5008 		if (rc)
5009 			return false;
5010 
5011 		msleep(delay);
5012 		return true;
5013 	}
5014 
5015 	if (rc)
5016 		return false;
5017 
5018 	return true;
5019 }
5020 
5021 /**
5022  * pcie_wait_for_link - Wait until link is active or inactive
5023  * @pdev: Bridge device
5024  * @active: waiting for active or inactive?
5025  *
5026  * Use this to wait till link becomes active or inactive.
5027  */
5028 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
5029 {
5030 	return pcie_wait_for_link_delay(pdev, active, 100);
5031 }
5032 
5033 /*
5034  * Find maximum D3cold delay required by all the devices on the bus.  The
5035  * spec says 100 ms, but firmware can lower it and we allow drivers to
5036  * increase it as well.
5037  *
5038  * Called with @pci_bus_sem locked for reading.
5039  */
5040 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
5041 {
5042 	const struct pci_dev *pdev;
5043 	int min_delay = 100;
5044 	int max_delay = 0;
5045 
5046 	list_for_each_entry(pdev, &bus->devices, bus_list) {
5047 		if (pdev->d3cold_delay < min_delay)
5048 			min_delay = pdev->d3cold_delay;
5049 		if (pdev->d3cold_delay > max_delay)
5050 			max_delay = pdev->d3cold_delay;
5051 	}
5052 
5053 	return max(min_delay, max_delay);
5054 }
5055 
5056 /**
5057  * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
5058  * @dev: PCI bridge
5059  * @reset_type: reset type in human-readable form
5060  *
5061  * Handle necessary delays before access to the devices on the secondary
5062  * side of the bridge are permitted after D3cold to D0 transition
5063  * or Conventional Reset.
5064  *
5065  * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
5066  * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
5067  * 4.3.2.
5068  *
5069  * Return 0 on success or -ENOTTY if the first device on the secondary bus
5070  * failed to become accessible.
5071  */
5072 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
5073 {
5074 	struct pci_dev *child;
5075 	int delay;
5076 
5077 	if (pci_dev_is_disconnected(dev))
5078 		return 0;
5079 
5080 	if (!pci_is_bridge(dev))
5081 		return 0;
5082 
5083 	down_read(&pci_bus_sem);
5084 
5085 	/*
5086 	 * We only deal with devices that are present currently on the bus.
5087 	 * For any hot-added devices the access delay is handled in pciehp
5088 	 * board_added(). In case of ACPI hotplug the firmware is expected
5089 	 * to configure the devices before OS is notified.
5090 	 */
5091 	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
5092 		up_read(&pci_bus_sem);
5093 		return 0;
5094 	}
5095 
5096 	/* Take d3cold_delay requirements into account */
5097 	delay = pci_bus_max_d3cold_delay(dev->subordinate);
5098 	if (!delay) {
5099 		up_read(&pci_bus_sem);
5100 		return 0;
5101 	}
5102 
5103 	child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
5104 				 bus_list);
5105 	up_read(&pci_bus_sem);
5106 
5107 	/*
5108 	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
5109 	 * accessing the device after reset (that is 1000 ms + 100 ms).
5110 	 */
5111 	if (!pci_is_pcie(dev)) {
5112 		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
5113 		msleep(1000 + delay);
5114 		return 0;
5115 	}
5116 
5117 	/*
5118 	 * For PCIe downstream and root ports that do not support speeds
5119 	 * greater than 5 GT/s need to wait minimum 100 ms. For higher
5120 	 * speeds (gen3) we need to wait first for the data link layer to
5121 	 * become active.
5122 	 *
5123 	 * However, 100 ms is the minimum and the PCIe spec says the
5124 	 * software must allow at least 1s before it can determine that the
5125 	 * device that did not respond is a broken device. Also device can
5126 	 * take longer than that to respond if it indicates so through Request
5127 	 * Retry Status completions.
5128 	 *
5129 	 * Therefore we wait for 100 ms and check for the device presence
5130 	 * until the timeout expires.
5131 	 */
5132 	if (!pcie_downstream_port(dev))
5133 		return 0;
5134 
5135 	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
5136 		u16 status;
5137 
5138 		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
5139 		msleep(delay);
5140 
5141 		if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay))
5142 			return 0;
5143 
5144 		/*
5145 		 * If the port supports active link reporting we now check
5146 		 * whether the link is active and if not bail out early with
5147 		 * the assumption that the device is not present anymore.
5148 		 */
5149 		if (!dev->link_active_reporting)
5150 			return -ENOTTY;
5151 
5152 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status);
5153 		if (!(status & PCI_EXP_LNKSTA_DLLLA))
5154 			return -ENOTTY;
5155 
5156 		return pci_dev_wait(child, reset_type,
5157 				    PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT);
5158 	}
5159 
5160 	pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
5161 		delay);
5162 	if (!pcie_wait_for_link_delay(dev, true, delay)) {
5163 		/* Did not train, no need to wait any further */
5164 		pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
5165 		return -ENOTTY;
5166 	}
5167 
5168 	return pci_dev_wait(child, reset_type,
5169 			    PCIE_RESET_READY_POLL_MS - delay);
5170 }
5171 
5172 void pci_reset_secondary_bus(struct pci_dev *dev)
5173 {
5174 	u16 ctrl;
5175 
5176 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
5177 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
5178 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5179 
5180 	/*
5181 	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
5182 	 * this to 2ms to ensure that we meet the minimum requirement.
5183 	 */
5184 	msleep(2);
5185 
5186 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
5187 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5188 }
5189 
5190 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
5191 {
5192 	pci_reset_secondary_bus(dev);
5193 }
5194 
5195 /**
5196  * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
5197  * @dev: Bridge device
5198  *
5199  * Use the bridge control register to assert reset on the secondary bus.
5200  * Devices on the secondary bus are left in power-on state.
5201  */
5202 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
5203 {
5204 	pcibios_reset_secondary_bus(dev);
5205 
5206 	return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
5207 }
5208 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
5209 
5210 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
5211 {
5212 	struct pci_dev *pdev;
5213 
5214 	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5215 	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5216 		return -ENOTTY;
5217 
5218 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5219 		if (pdev != dev)
5220 			return -ENOTTY;
5221 
5222 	if (probe)
5223 		return 0;
5224 
5225 	return pci_bridge_secondary_bus_reset(dev->bus->self);
5226 }
5227 
5228 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5229 {
5230 	int rc = -ENOTTY;
5231 
5232 	if (!hotplug || !try_module_get(hotplug->owner))
5233 		return rc;
5234 
5235 	if (hotplug->ops->reset_slot)
5236 		rc = hotplug->ops->reset_slot(hotplug, probe);
5237 
5238 	module_put(hotplug->owner);
5239 
5240 	return rc;
5241 }
5242 
5243 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5244 {
5245 	if (dev->multifunction || dev->subordinate || !dev->slot ||
5246 	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5247 		return -ENOTTY;
5248 
5249 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5250 }
5251 
5252 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5253 {
5254 	int rc;
5255 
5256 	rc = pci_dev_reset_slot_function(dev, probe);
5257 	if (rc != -ENOTTY)
5258 		return rc;
5259 	return pci_parent_bus_reset(dev, probe);
5260 }
5261 
5262 void pci_dev_lock(struct pci_dev *dev)
5263 {
5264 	/* block PM suspend, driver probe, etc. */
5265 	device_lock(&dev->dev);
5266 	pci_cfg_access_lock(dev);
5267 }
5268 EXPORT_SYMBOL_GPL(pci_dev_lock);
5269 
5270 /* Return 1 on successful lock, 0 on contention */
5271 int pci_dev_trylock(struct pci_dev *dev)
5272 {
5273 	if (device_trylock(&dev->dev)) {
5274 		if (pci_cfg_access_trylock(dev))
5275 			return 1;
5276 		device_unlock(&dev->dev);
5277 	}
5278 
5279 	return 0;
5280 }
5281 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5282 
5283 void pci_dev_unlock(struct pci_dev *dev)
5284 {
5285 	pci_cfg_access_unlock(dev);
5286 	device_unlock(&dev->dev);
5287 }
5288 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5289 
5290 static void pci_dev_save_and_disable(struct pci_dev *dev)
5291 {
5292 	const struct pci_error_handlers *err_handler =
5293 			dev->driver ? dev->driver->err_handler : NULL;
5294 
5295 	/*
5296 	 * dev->driver->err_handler->reset_prepare() is protected against
5297 	 * races with ->remove() by the device lock, which must be held by
5298 	 * the caller.
5299 	 */
5300 	if (err_handler && err_handler->reset_prepare)
5301 		err_handler->reset_prepare(dev);
5302 
5303 	/*
5304 	 * Wake-up device prior to save.  PM registers default to D0 after
5305 	 * reset and a simple register restore doesn't reliably return
5306 	 * to a non-D0 state anyway.
5307 	 */
5308 	pci_set_power_state(dev, PCI_D0);
5309 
5310 	pci_save_state(dev);
5311 	/*
5312 	 * Disable the device by clearing the Command register, except for
5313 	 * INTx-disable which is set.  This not only disables MMIO and I/O port
5314 	 * BARs, but also prevents the device from being Bus Master, preventing
5315 	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
5316 	 * compliant devices, INTx-disable prevents legacy interrupts.
5317 	 */
5318 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5319 }
5320 
5321 static void pci_dev_restore(struct pci_dev *dev)
5322 {
5323 	const struct pci_error_handlers *err_handler =
5324 			dev->driver ? dev->driver->err_handler : NULL;
5325 
5326 	pci_restore_state(dev);
5327 
5328 	/*
5329 	 * dev->driver->err_handler->reset_done() is protected against
5330 	 * races with ->remove() by the device lock, which must be held by
5331 	 * the caller.
5332 	 */
5333 	if (err_handler && err_handler->reset_done)
5334 		err_handler->reset_done(dev);
5335 }
5336 
5337 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5338 static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5339 	{ },
5340 	{ pci_dev_specific_reset, .name = "device_specific" },
5341 	{ pci_dev_acpi_reset, .name = "acpi" },
5342 	{ pcie_reset_flr, .name = "flr" },
5343 	{ pci_af_flr, .name = "af_flr" },
5344 	{ pci_pm_reset, .name = "pm" },
5345 	{ pci_reset_bus_function, .name = "bus" },
5346 };
5347 
5348 static ssize_t reset_method_show(struct device *dev,
5349 				 struct device_attribute *attr, char *buf)
5350 {
5351 	struct pci_dev *pdev = to_pci_dev(dev);
5352 	ssize_t len = 0;
5353 	int i, m;
5354 
5355 	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5356 		m = pdev->reset_methods[i];
5357 		if (!m)
5358 			break;
5359 
5360 		len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5361 				     pci_reset_fn_methods[m].name);
5362 	}
5363 
5364 	if (len)
5365 		len += sysfs_emit_at(buf, len, "\n");
5366 
5367 	return len;
5368 }
5369 
5370 static int reset_method_lookup(const char *name)
5371 {
5372 	int m;
5373 
5374 	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5375 		if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5376 			return m;
5377 	}
5378 
5379 	return 0;	/* not found */
5380 }
5381 
5382 static ssize_t reset_method_store(struct device *dev,
5383 				  struct device_attribute *attr,
5384 				  const char *buf, size_t count)
5385 {
5386 	struct pci_dev *pdev = to_pci_dev(dev);
5387 	char *options, *name;
5388 	int m, n;
5389 	u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5390 
5391 	if (sysfs_streq(buf, "")) {
5392 		pdev->reset_methods[0] = 0;
5393 		pci_warn(pdev, "All device reset methods disabled by user");
5394 		return count;
5395 	}
5396 
5397 	if (sysfs_streq(buf, "default")) {
5398 		pci_init_reset_methods(pdev);
5399 		return count;
5400 	}
5401 
5402 	options = kstrndup(buf, count, GFP_KERNEL);
5403 	if (!options)
5404 		return -ENOMEM;
5405 
5406 	n = 0;
5407 	while ((name = strsep(&options, " ")) != NULL) {
5408 		if (sysfs_streq(name, ""))
5409 			continue;
5410 
5411 		name = strim(name);
5412 
5413 		m = reset_method_lookup(name);
5414 		if (!m) {
5415 			pci_err(pdev, "Invalid reset method '%s'", name);
5416 			goto error;
5417 		}
5418 
5419 		if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5420 			pci_err(pdev, "Unsupported reset method '%s'", name);
5421 			goto error;
5422 		}
5423 
5424 		if (n == PCI_NUM_RESET_METHODS - 1) {
5425 			pci_err(pdev, "Too many reset methods\n");
5426 			goto error;
5427 		}
5428 
5429 		reset_methods[n++] = m;
5430 	}
5431 
5432 	reset_methods[n] = 0;
5433 
5434 	/* Warn if dev-specific supported but not highest priority */
5435 	if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5436 	    reset_methods[0] != 1)
5437 		pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5438 	memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5439 	kfree(options);
5440 	return count;
5441 
5442 error:
5443 	/* Leave previous methods unchanged */
5444 	kfree(options);
5445 	return -EINVAL;
5446 }
5447 static DEVICE_ATTR_RW(reset_method);
5448 
5449 static struct attribute *pci_dev_reset_method_attrs[] = {
5450 	&dev_attr_reset_method.attr,
5451 	NULL,
5452 };
5453 
5454 static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5455 						    struct attribute *a, int n)
5456 {
5457 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5458 
5459 	if (!pci_reset_supported(pdev))
5460 		return 0;
5461 
5462 	return a->mode;
5463 }
5464 
5465 const struct attribute_group pci_dev_reset_method_attr_group = {
5466 	.attrs = pci_dev_reset_method_attrs,
5467 	.is_visible = pci_dev_reset_method_attr_is_visible,
5468 };
5469 
5470 /**
5471  * __pci_reset_function_locked - reset a PCI device function while holding
5472  * the @dev mutex lock.
5473  * @dev: PCI device to reset
5474  *
5475  * Some devices allow an individual function to be reset without affecting
5476  * other functions in the same device.  The PCI device must be responsive
5477  * to PCI config space in order to use this function.
5478  *
5479  * The device function is presumed to be unused and the caller is holding
5480  * the device mutex lock when this function is called.
5481  *
5482  * Resetting the device will make the contents of PCI configuration space
5483  * random, so any caller of this must be prepared to reinitialise the
5484  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5485  * etc.
5486  *
5487  * Returns 0 if the device function was successfully reset or negative if the
5488  * device doesn't support resetting a single function.
5489  */
5490 int __pci_reset_function_locked(struct pci_dev *dev)
5491 {
5492 	int i, m, rc;
5493 
5494 	might_sleep();
5495 
5496 	/*
5497 	 * A reset method returns -ENOTTY if it doesn't support this device and
5498 	 * we should try the next method.
5499 	 *
5500 	 * If it returns 0 (success), we're finished.  If it returns any other
5501 	 * error, we're also finished: this indicates that further reset
5502 	 * mechanisms might be broken on the device.
5503 	 */
5504 	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5505 		m = dev->reset_methods[i];
5506 		if (!m)
5507 			return -ENOTTY;
5508 
5509 		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5510 		if (!rc)
5511 			return 0;
5512 		if (rc != -ENOTTY)
5513 			return rc;
5514 	}
5515 
5516 	return -ENOTTY;
5517 }
5518 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5519 
5520 /**
5521  * pci_init_reset_methods - check whether device can be safely reset
5522  * and store supported reset mechanisms.
5523  * @dev: PCI device to check for reset mechanisms
5524  *
5525  * Some devices allow an individual function to be reset without affecting
5526  * other functions in the same device.  The PCI device must be in D0-D3hot
5527  * state.
5528  *
5529  * Stores reset mechanisms supported by device in reset_methods byte array
5530  * which is a member of struct pci_dev.
5531  */
5532 void pci_init_reset_methods(struct pci_dev *dev)
5533 {
5534 	int m, i, rc;
5535 
5536 	BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5537 
5538 	might_sleep();
5539 
5540 	i = 0;
5541 	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5542 		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5543 		if (!rc)
5544 			dev->reset_methods[i++] = m;
5545 		else if (rc != -ENOTTY)
5546 			break;
5547 	}
5548 
5549 	dev->reset_methods[i] = 0;
5550 }
5551 
5552 /**
5553  * pci_reset_function - quiesce and reset a PCI device function
5554  * @dev: PCI device to reset
5555  *
5556  * Some devices allow an individual function to be reset without affecting
5557  * other functions in the same device.  The PCI device must be responsive
5558  * to PCI config space in order to use this function.
5559  *
5560  * This function does not just reset the PCI portion of a device, but
5561  * clears all the state associated with the device.  This function differs
5562  * from __pci_reset_function_locked() in that it saves and restores device state
5563  * over the reset and takes the PCI device lock.
5564  *
5565  * Returns 0 if the device function was successfully reset or negative if the
5566  * device doesn't support resetting a single function.
5567  */
5568 int pci_reset_function(struct pci_dev *dev)
5569 {
5570 	int rc;
5571 
5572 	if (!pci_reset_supported(dev))
5573 		return -ENOTTY;
5574 
5575 	pci_dev_lock(dev);
5576 	pci_dev_save_and_disable(dev);
5577 
5578 	rc = __pci_reset_function_locked(dev);
5579 
5580 	pci_dev_restore(dev);
5581 	pci_dev_unlock(dev);
5582 
5583 	return rc;
5584 }
5585 EXPORT_SYMBOL_GPL(pci_reset_function);
5586 
5587 /**
5588  * pci_reset_function_locked - quiesce and reset a PCI device function
5589  * @dev: PCI device to reset
5590  *
5591  * Some devices allow an individual function to be reset without affecting
5592  * other functions in the same device.  The PCI device must be responsive
5593  * to PCI config space in order to use this function.
5594  *
5595  * This function does not just reset the PCI portion of a device, but
5596  * clears all the state associated with the device.  This function differs
5597  * from __pci_reset_function_locked() in that it saves and restores device state
5598  * over the reset.  It also differs from pci_reset_function() in that it
5599  * requires the PCI device lock to be held.
5600  *
5601  * Returns 0 if the device function was successfully reset or negative if the
5602  * device doesn't support resetting a single function.
5603  */
5604 int pci_reset_function_locked(struct pci_dev *dev)
5605 {
5606 	int rc;
5607 
5608 	if (!pci_reset_supported(dev))
5609 		return -ENOTTY;
5610 
5611 	pci_dev_save_and_disable(dev);
5612 
5613 	rc = __pci_reset_function_locked(dev);
5614 
5615 	pci_dev_restore(dev);
5616 
5617 	return rc;
5618 }
5619 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5620 
5621 /**
5622  * pci_try_reset_function - quiesce and reset a PCI device function
5623  * @dev: PCI device to reset
5624  *
5625  * Same as above, except return -EAGAIN if unable to lock device.
5626  */
5627 int pci_try_reset_function(struct pci_dev *dev)
5628 {
5629 	int rc;
5630 
5631 	if (!pci_reset_supported(dev))
5632 		return -ENOTTY;
5633 
5634 	if (!pci_dev_trylock(dev))
5635 		return -EAGAIN;
5636 
5637 	pci_dev_save_and_disable(dev);
5638 	rc = __pci_reset_function_locked(dev);
5639 	pci_dev_restore(dev);
5640 	pci_dev_unlock(dev);
5641 
5642 	return rc;
5643 }
5644 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5645 
5646 /* Do any devices on or below this bus prevent a bus reset? */
5647 static bool pci_bus_resettable(struct pci_bus *bus)
5648 {
5649 	struct pci_dev *dev;
5650 
5651 
5652 	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5653 		return false;
5654 
5655 	list_for_each_entry(dev, &bus->devices, bus_list) {
5656 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5657 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5658 			return false;
5659 	}
5660 
5661 	return true;
5662 }
5663 
5664 /* Lock devices from the top of the tree down */
5665 static void pci_bus_lock(struct pci_bus *bus)
5666 {
5667 	struct pci_dev *dev;
5668 
5669 	list_for_each_entry(dev, &bus->devices, bus_list) {
5670 		pci_dev_lock(dev);
5671 		if (dev->subordinate)
5672 			pci_bus_lock(dev->subordinate);
5673 	}
5674 }
5675 
5676 /* Unlock devices from the bottom of the tree up */
5677 static void pci_bus_unlock(struct pci_bus *bus)
5678 {
5679 	struct pci_dev *dev;
5680 
5681 	list_for_each_entry(dev, &bus->devices, bus_list) {
5682 		if (dev->subordinate)
5683 			pci_bus_unlock(dev->subordinate);
5684 		pci_dev_unlock(dev);
5685 	}
5686 }
5687 
5688 /* Return 1 on successful lock, 0 on contention */
5689 static int pci_bus_trylock(struct pci_bus *bus)
5690 {
5691 	struct pci_dev *dev;
5692 
5693 	list_for_each_entry(dev, &bus->devices, bus_list) {
5694 		if (!pci_dev_trylock(dev))
5695 			goto unlock;
5696 		if (dev->subordinate) {
5697 			if (!pci_bus_trylock(dev->subordinate)) {
5698 				pci_dev_unlock(dev);
5699 				goto unlock;
5700 			}
5701 		}
5702 	}
5703 	return 1;
5704 
5705 unlock:
5706 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5707 		if (dev->subordinate)
5708 			pci_bus_unlock(dev->subordinate);
5709 		pci_dev_unlock(dev);
5710 	}
5711 	return 0;
5712 }
5713 
5714 /* Do any devices on or below this slot prevent a bus reset? */
5715 static bool pci_slot_resettable(struct pci_slot *slot)
5716 {
5717 	struct pci_dev *dev;
5718 
5719 	if (slot->bus->self &&
5720 	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5721 		return false;
5722 
5723 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5724 		if (!dev->slot || dev->slot != slot)
5725 			continue;
5726 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5727 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5728 			return false;
5729 	}
5730 
5731 	return true;
5732 }
5733 
5734 /* Lock devices from the top of the tree down */
5735 static void pci_slot_lock(struct pci_slot *slot)
5736 {
5737 	struct pci_dev *dev;
5738 
5739 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5740 		if (!dev->slot || dev->slot != slot)
5741 			continue;
5742 		pci_dev_lock(dev);
5743 		if (dev->subordinate)
5744 			pci_bus_lock(dev->subordinate);
5745 	}
5746 }
5747 
5748 /* Unlock devices from the bottom of the tree up */
5749 static void pci_slot_unlock(struct pci_slot *slot)
5750 {
5751 	struct pci_dev *dev;
5752 
5753 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5754 		if (!dev->slot || dev->slot != slot)
5755 			continue;
5756 		if (dev->subordinate)
5757 			pci_bus_unlock(dev->subordinate);
5758 		pci_dev_unlock(dev);
5759 	}
5760 }
5761 
5762 /* Return 1 on successful lock, 0 on contention */
5763 static int pci_slot_trylock(struct pci_slot *slot)
5764 {
5765 	struct pci_dev *dev;
5766 
5767 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5768 		if (!dev->slot || dev->slot != slot)
5769 			continue;
5770 		if (!pci_dev_trylock(dev))
5771 			goto unlock;
5772 		if (dev->subordinate) {
5773 			if (!pci_bus_trylock(dev->subordinate)) {
5774 				pci_dev_unlock(dev);
5775 				goto unlock;
5776 			}
5777 		}
5778 	}
5779 	return 1;
5780 
5781 unlock:
5782 	list_for_each_entry_continue_reverse(dev,
5783 					     &slot->bus->devices, bus_list) {
5784 		if (!dev->slot || dev->slot != slot)
5785 			continue;
5786 		if (dev->subordinate)
5787 			pci_bus_unlock(dev->subordinate);
5788 		pci_dev_unlock(dev);
5789 	}
5790 	return 0;
5791 }
5792 
5793 /*
5794  * Save and disable devices from the top of the tree down while holding
5795  * the @dev mutex lock for the entire tree.
5796  */
5797 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5798 {
5799 	struct pci_dev *dev;
5800 
5801 	list_for_each_entry(dev, &bus->devices, bus_list) {
5802 		pci_dev_save_and_disable(dev);
5803 		if (dev->subordinate)
5804 			pci_bus_save_and_disable_locked(dev->subordinate);
5805 	}
5806 }
5807 
5808 /*
5809  * Restore devices from top of the tree down while holding @dev mutex lock
5810  * for the entire tree.  Parent bridges need to be restored before we can
5811  * get to subordinate devices.
5812  */
5813 static void pci_bus_restore_locked(struct pci_bus *bus)
5814 {
5815 	struct pci_dev *dev;
5816 
5817 	list_for_each_entry(dev, &bus->devices, bus_list) {
5818 		pci_dev_restore(dev);
5819 		if (dev->subordinate)
5820 			pci_bus_restore_locked(dev->subordinate);
5821 	}
5822 }
5823 
5824 /*
5825  * Save and disable devices from the top of the tree down while holding
5826  * the @dev mutex lock for the entire tree.
5827  */
5828 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5829 {
5830 	struct pci_dev *dev;
5831 
5832 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5833 		if (!dev->slot || dev->slot != slot)
5834 			continue;
5835 		pci_dev_save_and_disable(dev);
5836 		if (dev->subordinate)
5837 			pci_bus_save_and_disable_locked(dev->subordinate);
5838 	}
5839 }
5840 
5841 /*
5842  * Restore devices from top of the tree down while holding @dev mutex lock
5843  * for the entire tree.  Parent bridges need to be restored before we can
5844  * get to subordinate devices.
5845  */
5846 static void pci_slot_restore_locked(struct pci_slot *slot)
5847 {
5848 	struct pci_dev *dev;
5849 
5850 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5851 		if (!dev->slot || dev->slot != slot)
5852 			continue;
5853 		pci_dev_restore(dev);
5854 		if (dev->subordinate)
5855 			pci_bus_restore_locked(dev->subordinate);
5856 	}
5857 }
5858 
5859 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5860 {
5861 	int rc;
5862 
5863 	if (!slot || !pci_slot_resettable(slot))
5864 		return -ENOTTY;
5865 
5866 	if (!probe)
5867 		pci_slot_lock(slot);
5868 
5869 	might_sleep();
5870 
5871 	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5872 
5873 	if (!probe)
5874 		pci_slot_unlock(slot);
5875 
5876 	return rc;
5877 }
5878 
5879 /**
5880  * pci_probe_reset_slot - probe whether a PCI slot can be reset
5881  * @slot: PCI slot to probe
5882  *
5883  * Return 0 if slot can be reset, negative if a slot reset is not supported.
5884  */
5885 int pci_probe_reset_slot(struct pci_slot *slot)
5886 {
5887 	return pci_slot_reset(slot, PCI_RESET_PROBE);
5888 }
5889 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5890 
5891 /**
5892  * __pci_reset_slot - Try to reset a PCI slot
5893  * @slot: PCI slot to reset
5894  *
5895  * A PCI bus may host multiple slots, each slot may support a reset mechanism
5896  * independent of other slots.  For instance, some slots may support slot power
5897  * control.  In the case of a 1:1 bus to slot architecture, this function may
5898  * wrap the bus reset to avoid spurious slot related events such as hotplug.
5899  * Generally a slot reset should be attempted before a bus reset.  All of the
5900  * function of the slot and any subordinate buses behind the slot are reset
5901  * through this function.  PCI config space of all devices in the slot and
5902  * behind the slot is saved before and restored after reset.
5903  *
5904  * Same as above except return -EAGAIN if the slot cannot be locked
5905  */
5906 static int __pci_reset_slot(struct pci_slot *slot)
5907 {
5908 	int rc;
5909 
5910 	rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5911 	if (rc)
5912 		return rc;
5913 
5914 	if (pci_slot_trylock(slot)) {
5915 		pci_slot_save_and_disable_locked(slot);
5916 		might_sleep();
5917 		rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5918 		pci_slot_restore_locked(slot);
5919 		pci_slot_unlock(slot);
5920 	} else
5921 		rc = -EAGAIN;
5922 
5923 	return rc;
5924 }
5925 
5926 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5927 {
5928 	int ret;
5929 
5930 	if (!bus->self || !pci_bus_resettable(bus))
5931 		return -ENOTTY;
5932 
5933 	if (probe)
5934 		return 0;
5935 
5936 	pci_bus_lock(bus);
5937 
5938 	might_sleep();
5939 
5940 	ret = pci_bridge_secondary_bus_reset(bus->self);
5941 
5942 	pci_bus_unlock(bus);
5943 
5944 	return ret;
5945 }
5946 
5947 /**
5948  * pci_bus_error_reset - reset the bridge's subordinate bus
5949  * @bridge: The parent device that connects to the bus to reset
5950  *
5951  * This function will first try to reset the slots on this bus if the method is
5952  * available. If slot reset fails or is not available, this will fall back to a
5953  * secondary bus reset.
5954  */
5955 int pci_bus_error_reset(struct pci_dev *bridge)
5956 {
5957 	struct pci_bus *bus = bridge->subordinate;
5958 	struct pci_slot *slot;
5959 
5960 	if (!bus)
5961 		return -ENOTTY;
5962 
5963 	mutex_lock(&pci_slot_mutex);
5964 	if (list_empty(&bus->slots))
5965 		goto bus_reset;
5966 
5967 	list_for_each_entry(slot, &bus->slots, list)
5968 		if (pci_probe_reset_slot(slot))
5969 			goto bus_reset;
5970 
5971 	list_for_each_entry(slot, &bus->slots, list)
5972 		if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5973 			goto bus_reset;
5974 
5975 	mutex_unlock(&pci_slot_mutex);
5976 	return 0;
5977 bus_reset:
5978 	mutex_unlock(&pci_slot_mutex);
5979 	return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5980 }
5981 
5982 /**
5983  * pci_probe_reset_bus - probe whether a PCI bus can be reset
5984  * @bus: PCI bus to probe
5985  *
5986  * Return 0 if bus can be reset, negative if a bus reset is not supported.
5987  */
5988 int pci_probe_reset_bus(struct pci_bus *bus)
5989 {
5990 	return pci_bus_reset(bus, PCI_RESET_PROBE);
5991 }
5992 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5993 
5994 /**
5995  * __pci_reset_bus - Try to reset a PCI bus
5996  * @bus: top level PCI bus to reset
5997  *
5998  * Same as above except return -EAGAIN if the bus cannot be locked
5999  */
6000 static int __pci_reset_bus(struct pci_bus *bus)
6001 {
6002 	int rc;
6003 
6004 	rc = pci_bus_reset(bus, PCI_RESET_PROBE);
6005 	if (rc)
6006 		return rc;
6007 
6008 	if (pci_bus_trylock(bus)) {
6009 		pci_bus_save_and_disable_locked(bus);
6010 		might_sleep();
6011 		rc = pci_bridge_secondary_bus_reset(bus->self);
6012 		pci_bus_restore_locked(bus);
6013 		pci_bus_unlock(bus);
6014 	} else
6015 		rc = -EAGAIN;
6016 
6017 	return rc;
6018 }
6019 
6020 /**
6021  * pci_reset_bus - Try to reset a PCI bus
6022  * @pdev: top level PCI device to reset via slot/bus
6023  *
6024  * Same as above except return -EAGAIN if the bus cannot be locked
6025  */
6026 int pci_reset_bus(struct pci_dev *pdev)
6027 {
6028 	return (!pci_probe_reset_slot(pdev->slot)) ?
6029 	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
6030 }
6031 EXPORT_SYMBOL_GPL(pci_reset_bus);
6032 
6033 /**
6034  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
6035  * @dev: PCI device to query
6036  *
6037  * Returns mmrbc: maximum designed memory read count in bytes or
6038  * appropriate error value.
6039  */
6040 int pcix_get_max_mmrbc(struct pci_dev *dev)
6041 {
6042 	int cap;
6043 	u32 stat;
6044 
6045 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
6046 	if (!cap)
6047 		return -EINVAL;
6048 
6049 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
6050 		return -EINVAL;
6051 
6052 	return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat);
6053 }
6054 EXPORT_SYMBOL(pcix_get_max_mmrbc);
6055 
6056 /**
6057  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
6058  * @dev: PCI device to query
6059  *
6060  * Returns mmrbc: maximum memory read count in bytes or appropriate error
6061  * value.
6062  */
6063 int pcix_get_mmrbc(struct pci_dev *dev)
6064 {
6065 	int cap;
6066 	u16 cmd;
6067 
6068 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
6069 	if (!cap)
6070 		return -EINVAL;
6071 
6072 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
6073 		return -EINVAL;
6074 
6075 	return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
6076 }
6077 EXPORT_SYMBOL(pcix_get_mmrbc);
6078 
6079 /**
6080  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
6081  * @dev: PCI device to query
6082  * @mmrbc: maximum memory read count in bytes
6083  *    valid values are 512, 1024, 2048, 4096
6084  *
6085  * If possible sets maximum memory read byte count, some bridges have errata
6086  * that prevent this.
6087  */
6088 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
6089 {
6090 	int cap;
6091 	u32 stat, v, o;
6092 	u16 cmd;
6093 
6094 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
6095 		return -EINVAL;
6096 
6097 	v = ffs(mmrbc) - 10;
6098 
6099 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
6100 	if (!cap)
6101 		return -EINVAL;
6102 
6103 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
6104 		return -EINVAL;
6105 
6106 	if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat))
6107 		return -E2BIG;
6108 
6109 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
6110 		return -EINVAL;
6111 
6112 	o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
6113 	if (o != v) {
6114 		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
6115 			return -EIO;
6116 
6117 		cmd &= ~PCI_X_CMD_MAX_READ;
6118 		cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v);
6119 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
6120 			return -EIO;
6121 	}
6122 	return 0;
6123 }
6124 EXPORT_SYMBOL(pcix_set_mmrbc);
6125 
6126 /**
6127  * pcie_get_readrq - get PCI Express read request size
6128  * @dev: PCI device to query
6129  *
6130  * Returns maximum memory read request in bytes or appropriate error value.
6131  */
6132 int pcie_get_readrq(struct pci_dev *dev)
6133 {
6134 	u16 ctl;
6135 
6136 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6137 
6138 	return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl);
6139 }
6140 EXPORT_SYMBOL(pcie_get_readrq);
6141 
6142 /**
6143  * pcie_set_readrq - set PCI Express maximum memory read request
6144  * @dev: PCI device to query
6145  * @rq: maximum memory read count in bytes
6146  *    valid values are 128, 256, 512, 1024, 2048, 4096
6147  *
6148  * If possible sets maximum memory read request in bytes
6149  */
6150 int pcie_set_readrq(struct pci_dev *dev, int rq)
6151 {
6152 	u16 v;
6153 	int ret;
6154 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
6155 
6156 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
6157 		return -EINVAL;
6158 
6159 	/*
6160 	 * If using the "performance" PCIe config, we clamp the read rq
6161 	 * size to the max packet size to keep the host bridge from
6162 	 * generating requests larger than we can cope with.
6163 	 */
6164 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
6165 		int mps = pcie_get_mps(dev);
6166 
6167 		if (mps < rq)
6168 			rq = mps;
6169 	}
6170 
6171 	v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, ffs(rq) - 8);
6172 
6173 	if (bridge->no_inc_mrrs) {
6174 		int max_mrrs = pcie_get_readrq(dev);
6175 
6176 		if (rq > max_mrrs) {
6177 			pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
6178 			return -EINVAL;
6179 		}
6180 	}
6181 
6182 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6183 						  PCI_EXP_DEVCTL_READRQ, v);
6184 
6185 	return pcibios_err_to_errno(ret);
6186 }
6187 EXPORT_SYMBOL(pcie_set_readrq);
6188 
6189 /**
6190  * pcie_get_mps - get PCI Express maximum payload size
6191  * @dev: PCI device to query
6192  *
6193  * Returns maximum payload size in bytes
6194  */
6195 int pcie_get_mps(struct pci_dev *dev)
6196 {
6197 	u16 ctl;
6198 
6199 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6200 
6201 	return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl);
6202 }
6203 EXPORT_SYMBOL(pcie_get_mps);
6204 
6205 /**
6206  * pcie_set_mps - set PCI Express maximum payload size
6207  * @dev: PCI device to query
6208  * @mps: maximum payload size in bytes
6209  *    valid values are 128, 256, 512, 1024, 2048, 4096
6210  *
6211  * If possible sets maximum payload size
6212  */
6213 int pcie_set_mps(struct pci_dev *dev, int mps)
6214 {
6215 	u16 v;
6216 	int ret;
6217 
6218 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6219 		return -EINVAL;
6220 
6221 	v = ffs(mps) - 8;
6222 	if (v > dev->pcie_mpss)
6223 		return -EINVAL;
6224 	v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v);
6225 
6226 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6227 						  PCI_EXP_DEVCTL_PAYLOAD, v);
6228 
6229 	return pcibios_err_to_errno(ret);
6230 }
6231 EXPORT_SYMBOL(pcie_set_mps);
6232 
6233 /**
6234  * pcie_bandwidth_available - determine minimum link settings of a PCIe
6235  *			      device and its bandwidth limitation
6236  * @dev: PCI device to query
6237  * @limiting_dev: storage for device causing the bandwidth limitation
6238  * @speed: storage for speed of limiting device
6239  * @width: storage for width of limiting device
6240  *
6241  * Walk up the PCI device chain and find the point where the minimum
6242  * bandwidth is available.  Return the bandwidth available there and (if
6243  * limiting_dev, speed, and width pointers are supplied) information about
6244  * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
6245  * raw bandwidth.
6246  */
6247 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6248 			     enum pci_bus_speed *speed,
6249 			     enum pcie_link_width *width)
6250 {
6251 	u16 lnksta;
6252 	enum pci_bus_speed next_speed;
6253 	enum pcie_link_width next_width;
6254 	u32 bw, next_bw;
6255 
6256 	if (speed)
6257 		*speed = PCI_SPEED_UNKNOWN;
6258 	if (width)
6259 		*width = PCIE_LNK_WIDTH_UNKNOWN;
6260 
6261 	bw = 0;
6262 
6263 	while (dev) {
6264 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6265 
6266 		next_speed = pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS,
6267 						       lnksta)];
6268 		next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
6269 
6270 		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6271 
6272 		/* Check if current device limits the total bandwidth */
6273 		if (!bw || next_bw <= bw) {
6274 			bw = next_bw;
6275 
6276 			if (limiting_dev)
6277 				*limiting_dev = dev;
6278 			if (speed)
6279 				*speed = next_speed;
6280 			if (width)
6281 				*width = next_width;
6282 		}
6283 
6284 		dev = pci_upstream_bridge(dev);
6285 	}
6286 
6287 	return bw;
6288 }
6289 EXPORT_SYMBOL(pcie_bandwidth_available);
6290 
6291 /**
6292  * pcie_get_speed_cap - query for the PCI device's link speed capability
6293  * @dev: PCI device to query
6294  *
6295  * Query the PCI device speed capability.  Return the maximum link speed
6296  * supported by the device.
6297  */
6298 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6299 {
6300 	u32 lnkcap2, lnkcap;
6301 
6302 	/*
6303 	 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
6304 	 * implementation note there recommends using the Supported Link
6305 	 * Speeds Vector in Link Capabilities 2 when supported.
6306 	 *
6307 	 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
6308 	 * should use the Supported Link Speeds field in Link Capabilities,
6309 	 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
6310 	 */
6311 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6312 
6313 	/* PCIe r3.0-compliant */
6314 	if (lnkcap2)
6315 		return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6316 
6317 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6318 	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6319 		return PCIE_SPEED_5_0GT;
6320 	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6321 		return PCIE_SPEED_2_5GT;
6322 
6323 	return PCI_SPEED_UNKNOWN;
6324 }
6325 EXPORT_SYMBOL(pcie_get_speed_cap);
6326 
6327 /**
6328  * pcie_get_width_cap - query for the PCI device's link width capability
6329  * @dev: PCI device to query
6330  *
6331  * Query the PCI device width capability.  Return the maximum link width
6332  * supported by the device.
6333  */
6334 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6335 {
6336 	u32 lnkcap;
6337 
6338 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6339 	if (lnkcap)
6340 		return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
6341 
6342 	return PCIE_LNK_WIDTH_UNKNOWN;
6343 }
6344 EXPORT_SYMBOL(pcie_get_width_cap);
6345 
6346 /**
6347  * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6348  * @dev: PCI device
6349  * @speed: storage for link speed
6350  * @width: storage for link width
6351  *
6352  * Calculate a PCI device's link bandwidth by querying for its link speed
6353  * and width, multiplying them, and applying encoding overhead.  The result
6354  * is in Mb/s, i.e., megabits/second of raw bandwidth.
6355  */
6356 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
6357 			   enum pcie_link_width *width)
6358 {
6359 	*speed = pcie_get_speed_cap(dev);
6360 	*width = pcie_get_width_cap(dev);
6361 
6362 	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6363 		return 0;
6364 
6365 	return *width * PCIE_SPEED2MBS_ENC(*speed);
6366 }
6367 
6368 /**
6369  * __pcie_print_link_status - Report the PCI device's link speed and width
6370  * @dev: PCI device to query
6371  * @verbose: Print info even when enough bandwidth is available
6372  *
6373  * If the available bandwidth at the device is less than the device is
6374  * capable of, report the device's maximum possible bandwidth and the
6375  * upstream link that limits its performance.  If @verbose, always print
6376  * the available bandwidth, even if the device isn't constrained.
6377  */
6378 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6379 {
6380 	enum pcie_link_width width, width_cap;
6381 	enum pci_bus_speed speed, speed_cap;
6382 	struct pci_dev *limiting_dev = NULL;
6383 	u32 bw_avail, bw_cap;
6384 
6385 	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6386 	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6387 
6388 	if (bw_avail >= bw_cap && verbose)
6389 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6390 			 bw_cap / 1000, bw_cap % 1000,
6391 			 pci_speed_string(speed_cap), width_cap);
6392 	else if (bw_avail < bw_cap)
6393 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6394 			 bw_avail / 1000, bw_avail % 1000,
6395 			 pci_speed_string(speed), width,
6396 			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6397 			 bw_cap / 1000, bw_cap % 1000,
6398 			 pci_speed_string(speed_cap), width_cap);
6399 }
6400 
6401 /**
6402  * pcie_print_link_status - Report the PCI device's link speed and width
6403  * @dev: PCI device to query
6404  *
6405  * Report the available bandwidth at the device.
6406  */
6407 void pcie_print_link_status(struct pci_dev *dev)
6408 {
6409 	__pcie_print_link_status(dev, true);
6410 }
6411 EXPORT_SYMBOL(pcie_print_link_status);
6412 
6413 /**
6414  * pci_select_bars - Make BAR mask from the type of resource
6415  * @dev: the PCI device for which BAR mask is made
6416  * @flags: resource type mask to be selected
6417  *
6418  * This helper routine makes bar mask from the type of resource.
6419  */
6420 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6421 {
6422 	int i, bars = 0;
6423 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
6424 		if (pci_resource_flags(dev, i) & flags)
6425 			bars |= (1 << i);
6426 	return bars;
6427 }
6428 EXPORT_SYMBOL(pci_select_bars);
6429 
6430 /* Some architectures require additional programming to enable VGA */
6431 static arch_set_vga_state_t arch_set_vga_state;
6432 
6433 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6434 {
6435 	arch_set_vga_state = func;	/* NULL disables */
6436 }
6437 
6438 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6439 				  unsigned int command_bits, u32 flags)
6440 {
6441 	if (arch_set_vga_state)
6442 		return arch_set_vga_state(dev, decode, command_bits,
6443 						flags);
6444 	return 0;
6445 }
6446 
6447 /**
6448  * pci_set_vga_state - set VGA decode state on device and parents if requested
6449  * @dev: the PCI device
6450  * @decode: true = enable decoding, false = disable decoding
6451  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6452  * @flags: traverse ancestors and change bridges
6453  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6454  */
6455 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6456 		      unsigned int command_bits, u32 flags)
6457 {
6458 	struct pci_bus *bus;
6459 	struct pci_dev *bridge;
6460 	u16 cmd;
6461 	int rc;
6462 
6463 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6464 
6465 	/* ARCH specific VGA enables */
6466 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6467 	if (rc)
6468 		return rc;
6469 
6470 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6471 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
6472 		if (decode)
6473 			cmd |= command_bits;
6474 		else
6475 			cmd &= ~command_bits;
6476 		pci_write_config_word(dev, PCI_COMMAND, cmd);
6477 	}
6478 
6479 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6480 		return 0;
6481 
6482 	bus = dev->bus;
6483 	while (bus) {
6484 		bridge = bus->self;
6485 		if (bridge) {
6486 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6487 					     &cmd);
6488 			if (decode)
6489 				cmd |= PCI_BRIDGE_CTL_VGA;
6490 			else
6491 				cmd &= ~PCI_BRIDGE_CTL_VGA;
6492 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6493 					      cmd);
6494 		}
6495 		bus = bus->parent;
6496 	}
6497 	return 0;
6498 }
6499 
6500 #ifdef CONFIG_ACPI
6501 bool pci_pr3_present(struct pci_dev *pdev)
6502 {
6503 	struct acpi_device *adev;
6504 
6505 	if (acpi_disabled)
6506 		return false;
6507 
6508 	adev = ACPI_COMPANION(&pdev->dev);
6509 	if (!adev)
6510 		return false;
6511 
6512 	return adev->power.flags.power_resources &&
6513 		acpi_has_method(adev->handle, "_PR3");
6514 }
6515 EXPORT_SYMBOL_GPL(pci_pr3_present);
6516 #endif
6517 
6518 /**
6519  * pci_add_dma_alias - Add a DMA devfn alias for a device
6520  * @dev: the PCI device for which alias is added
6521  * @devfn_from: alias slot and function
6522  * @nr_devfns: number of subsequent devfns to alias
6523  *
6524  * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6525  * which is used to program permissible bus-devfn source addresses for DMA
6526  * requests in an IOMMU.  These aliases factor into IOMMU group creation
6527  * and are useful for devices generating DMA requests beyond or different
6528  * from their logical bus-devfn.  Examples include device quirks where the
6529  * device simply uses the wrong devfn, as well as non-transparent bridges
6530  * where the alias may be a proxy for devices in another domain.
6531  *
6532  * IOMMU group creation is performed during device discovery or addition,
6533  * prior to any potential DMA mapping and therefore prior to driver probing
6534  * (especially for userspace assigned devices where IOMMU group definition
6535  * cannot be left as a userspace activity).  DMA aliases should therefore
6536  * be configured via quirks, such as the PCI fixup header quirk.
6537  */
6538 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6539 		       unsigned int nr_devfns)
6540 {
6541 	int devfn_to;
6542 
6543 	nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6544 	devfn_to = devfn_from + nr_devfns - 1;
6545 
6546 	if (!dev->dma_alias_mask)
6547 		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6548 	if (!dev->dma_alias_mask) {
6549 		pci_warn(dev, "Unable to allocate DMA alias mask\n");
6550 		return;
6551 	}
6552 
6553 	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6554 
6555 	if (nr_devfns == 1)
6556 		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6557 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6558 	else if (nr_devfns > 1)
6559 		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6560 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6561 				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6562 }
6563 
6564 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6565 {
6566 	return (dev1->dma_alias_mask &&
6567 		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6568 	       (dev2->dma_alias_mask &&
6569 		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6570 	       pci_real_dma_dev(dev1) == dev2 ||
6571 	       pci_real_dma_dev(dev2) == dev1;
6572 }
6573 
6574 bool pci_device_is_present(struct pci_dev *pdev)
6575 {
6576 	u32 v;
6577 
6578 	/* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6579 	pdev = pci_physfn(pdev);
6580 	if (pci_dev_is_disconnected(pdev))
6581 		return false;
6582 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6583 }
6584 EXPORT_SYMBOL_GPL(pci_device_is_present);
6585 
6586 void pci_ignore_hotplug(struct pci_dev *dev)
6587 {
6588 	struct pci_dev *bridge = dev->bus->self;
6589 
6590 	dev->ignore_hotplug = 1;
6591 	/* Propagate the "ignore hotplug" setting to the parent bridge. */
6592 	if (bridge)
6593 		bridge->ignore_hotplug = 1;
6594 }
6595 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6596 
6597 /**
6598  * pci_real_dma_dev - Get PCI DMA device for PCI device
6599  * @dev: the PCI device that may have a PCI DMA alias
6600  *
6601  * Permits the platform to provide architecture-specific functionality to
6602  * devices needing to alias DMA to another PCI device on another PCI bus. If
6603  * the PCI device is on the same bus, it is recommended to use
6604  * pci_add_dma_alias(). This is the default implementation. Architecture
6605  * implementations can override this.
6606  */
6607 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6608 {
6609 	return dev;
6610 }
6611 
6612 resource_size_t __weak pcibios_default_alignment(void)
6613 {
6614 	return 0;
6615 }
6616 
6617 /*
6618  * Arches that don't want to expose struct resource to userland as-is in
6619  * sysfs and /proc can implement their own pci_resource_to_user().
6620  */
6621 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6622 				 const struct resource *rsrc,
6623 				 resource_size_t *start, resource_size_t *end)
6624 {
6625 	*start = rsrc->start;
6626 	*end = rsrc->end;
6627 }
6628 
6629 static char *resource_alignment_param;
6630 static DEFINE_SPINLOCK(resource_alignment_lock);
6631 
6632 /**
6633  * pci_specified_resource_alignment - get resource alignment specified by user.
6634  * @dev: the PCI device to get
6635  * @resize: whether or not to change resources' size when reassigning alignment
6636  *
6637  * RETURNS: Resource alignment if it is specified.
6638  *          Zero if it is not specified.
6639  */
6640 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6641 							bool *resize)
6642 {
6643 	int align_order, count;
6644 	resource_size_t align = pcibios_default_alignment();
6645 	const char *p;
6646 	int ret;
6647 
6648 	spin_lock(&resource_alignment_lock);
6649 	p = resource_alignment_param;
6650 	if (!p || !*p)
6651 		goto out;
6652 	if (pci_has_flag(PCI_PROBE_ONLY)) {
6653 		align = 0;
6654 		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6655 		goto out;
6656 	}
6657 
6658 	while (*p) {
6659 		count = 0;
6660 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6661 		    p[count] == '@') {
6662 			p += count + 1;
6663 			if (align_order > 63) {
6664 				pr_err("PCI: Invalid requested alignment (order %d)\n",
6665 				       align_order);
6666 				align_order = PAGE_SHIFT;
6667 			}
6668 		} else {
6669 			align_order = PAGE_SHIFT;
6670 		}
6671 
6672 		ret = pci_dev_str_match(dev, p, &p);
6673 		if (ret == 1) {
6674 			*resize = true;
6675 			align = 1ULL << align_order;
6676 			break;
6677 		} else if (ret < 0) {
6678 			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6679 			       p);
6680 			break;
6681 		}
6682 
6683 		if (*p != ';' && *p != ',') {
6684 			/* End of param or invalid format */
6685 			break;
6686 		}
6687 		p++;
6688 	}
6689 out:
6690 	spin_unlock(&resource_alignment_lock);
6691 	return align;
6692 }
6693 
6694 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6695 					   resource_size_t align, bool resize)
6696 {
6697 	struct resource *r = &dev->resource[bar];
6698 	resource_size_t size;
6699 
6700 	if (!(r->flags & IORESOURCE_MEM))
6701 		return;
6702 
6703 	if (r->flags & IORESOURCE_PCI_FIXED) {
6704 		pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6705 			 bar, r, (unsigned long long)align);
6706 		return;
6707 	}
6708 
6709 	size = resource_size(r);
6710 	if (size >= align)
6711 		return;
6712 
6713 	/*
6714 	 * Increase the alignment of the resource.  There are two ways we
6715 	 * can do this:
6716 	 *
6717 	 * 1) Increase the size of the resource.  BARs are aligned on their
6718 	 *    size, so when we reallocate space for this resource, we'll
6719 	 *    allocate it with the larger alignment.  This also prevents
6720 	 *    assignment of any other BARs inside the alignment region, so
6721 	 *    if we're requesting page alignment, this means no other BARs
6722 	 *    will share the page.
6723 	 *
6724 	 *    The disadvantage is that this makes the resource larger than
6725 	 *    the hardware BAR, which may break drivers that compute things
6726 	 *    based on the resource size, e.g., to find registers at a
6727 	 *    fixed offset before the end of the BAR.
6728 	 *
6729 	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6730 	 *    set r->start to the desired alignment.  By itself this
6731 	 *    doesn't prevent other BARs being put inside the alignment
6732 	 *    region, but if we realign *every* resource of every device in
6733 	 *    the system, none of them will share an alignment region.
6734 	 *
6735 	 * When the user has requested alignment for only some devices via
6736 	 * the "pci=resource_alignment" argument, "resize" is true and we
6737 	 * use the first method.  Otherwise we assume we're aligning all
6738 	 * devices and we use the second.
6739 	 */
6740 
6741 	pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6742 		 bar, r, (unsigned long long)align);
6743 
6744 	if (resize) {
6745 		r->start = 0;
6746 		r->end = align - 1;
6747 	} else {
6748 		r->flags &= ~IORESOURCE_SIZEALIGN;
6749 		r->flags |= IORESOURCE_STARTALIGN;
6750 		r->start = align;
6751 		r->end = r->start + size - 1;
6752 	}
6753 	r->flags |= IORESOURCE_UNSET;
6754 }
6755 
6756 /*
6757  * This function disables memory decoding and releases memory resources
6758  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6759  * It also rounds up size to specified alignment.
6760  * Later on, the kernel will assign page-aligned memory resource back
6761  * to the device.
6762  */
6763 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6764 {
6765 	int i;
6766 	struct resource *r;
6767 	resource_size_t align;
6768 	u16 command;
6769 	bool resize = false;
6770 
6771 	/*
6772 	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6773 	 * 3.4.1.11.  Their resources are allocated from the space
6774 	 * described by the VF BARx register in the PF's SR-IOV capability.
6775 	 * We can't influence their alignment here.
6776 	 */
6777 	if (dev->is_virtfn)
6778 		return;
6779 
6780 	/* check if specified PCI is target device to reassign */
6781 	align = pci_specified_resource_alignment(dev, &resize);
6782 	if (!align)
6783 		return;
6784 
6785 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6786 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6787 		pci_warn(dev, "Can't reassign resources to host bridge\n");
6788 		return;
6789 	}
6790 
6791 	pci_read_config_word(dev, PCI_COMMAND, &command);
6792 	command &= ~PCI_COMMAND_MEMORY;
6793 	pci_write_config_word(dev, PCI_COMMAND, command);
6794 
6795 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6796 		pci_request_resource_alignment(dev, i, align, resize);
6797 
6798 	/*
6799 	 * Need to disable bridge's resource window,
6800 	 * to enable the kernel to reassign new resource
6801 	 * window later on.
6802 	 */
6803 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6804 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6805 			r = &dev->resource[i];
6806 			if (!(r->flags & IORESOURCE_MEM))
6807 				continue;
6808 			r->flags |= IORESOURCE_UNSET;
6809 			r->end = resource_size(r) - 1;
6810 			r->start = 0;
6811 		}
6812 		pci_disable_bridge_window(dev);
6813 	}
6814 }
6815 
6816 static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf)
6817 {
6818 	size_t count = 0;
6819 
6820 	spin_lock(&resource_alignment_lock);
6821 	if (resource_alignment_param)
6822 		count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6823 	spin_unlock(&resource_alignment_lock);
6824 
6825 	return count;
6826 }
6827 
6828 static ssize_t resource_alignment_store(const struct bus_type *bus,
6829 					const char *buf, size_t count)
6830 {
6831 	char *param, *old, *end;
6832 
6833 	if (count >= (PAGE_SIZE - 1))
6834 		return -EINVAL;
6835 
6836 	param = kstrndup(buf, count, GFP_KERNEL);
6837 	if (!param)
6838 		return -ENOMEM;
6839 
6840 	end = strchr(param, '\n');
6841 	if (end)
6842 		*end = '\0';
6843 
6844 	spin_lock(&resource_alignment_lock);
6845 	old = resource_alignment_param;
6846 	if (strlen(param)) {
6847 		resource_alignment_param = param;
6848 	} else {
6849 		kfree(param);
6850 		resource_alignment_param = NULL;
6851 	}
6852 	spin_unlock(&resource_alignment_lock);
6853 
6854 	kfree(old);
6855 
6856 	return count;
6857 }
6858 
6859 static BUS_ATTR_RW(resource_alignment);
6860 
6861 static int __init pci_resource_alignment_sysfs_init(void)
6862 {
6863 	return bus_create_file(&pci_bus_type,
6864 					&bus_attr_resource_alignment);
6865 }
6866 late_initcall(pci_resource_alignment_sysfs_init);
6867 
6868 static void pci_no_domains(void)
6869 {
6870 #ifdef CONFIG_PCI_DOMAINS
6871 	pci_domains_supported = 0;
6872 #endif
6873 }
6874 
6875 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6876 static DEFINE_IDA(pci_domain_nr_static_ida);
6877 static DEFINE_IDA(pci_domain_nr_dynamic_ida);
6878 
6879 static void of_pci_reserve_static_domain_nr(void)
6880 {
6881 	struct device_node *np;
6882 	int domain_nr;
6883 
6884 	for_each_node_by_type(np, "pci") {
6885 		domain_nr = of_get_pci_domain_nr(np);
6886 		if (domain_nr < 0)
6887 			continue;
6888 		/*
6889 		 * Permanently allocate domain_nr in dynamic_ida
6890 		 * to prevent it from dynamic allocation.
6891 		 */
6892 		ida_alloc_range(&pci_domain_nr_dynamic_ida,
6893 				domain_nr, domain_nr, GFP_KERNEL);
6894 	}
6895 }
6896 
6897 static int of_pci_bus_find_domain_nr(struct device *parent)
6898 {
6899 	static bool static_domains_reserved = false;
6900 	int domain_nr;
6901 
6902 	/* On the first call scan device tree for static allocations. */
6903 	if (!static_domains_reserved) {
6904 		of_pci_reserve_static_domain_nr();
6905 		static_domains_reserved = true;
6906 	}
6907 
6908 	if (parent) {
6909 		/*
6910 		 * If domain is in DT, allocate it in static IDA.  This
6911 		 * prevents duplicate static allocations in case of errors
6912 		 * in DT.
6913 		 */
6914 		domain_nr = of_get_pci_domain_nr(parent->of_node);
6915 		if (domain_nr >= 0)
6916 			return ida_alloc_range(&pci_domain_nr_static_ida,
6917 					       domain_nr, domain_nr,
6918 					       GFP_KERNEL);
6919 	}
6920 
6921 	/*
6922 	 * If domain was not specified in DT, choose a free ID from dynamic
6923 	 * allocations. All domain numbers from DT are permanently in
6924 	 * dynamic allocations to prevent assigning them to other DT nodes
6925 	 * without static domain.
6926 	 */
6927 	return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
6928 }
6929 
6930 static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
6931 {
6932 	if (bus->domain_nr < 0)
6933 		return;
6934 
6935 	/* Release domain from IDA where it was allocated. */
6936 	if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr)
6937 		ida_free(&pci_domain_nr_static_ida, bus->domain_nr);
6938 	else
6939 		ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr);
6940 }
6941 
6942 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6943 {
6944 	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6945 			       acpi_pci_bus_find_domain_nr(bus);
6946 }
6947 
6948 void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
6949 {
6950 	if (!acpi_disabled)
6951 		return;
6952 	of_pci_bus_release_domain_nr(bus, parent);
6953 }
6954 #endif
6955 
6956 /**
6957  * pci_ext_cfg_avail - can we access extended PCI config space?
6958  *
6959  * Returns 1 if we can access PCI extended config space (offsets
6960  * greater than 0xff). This is the default implementation. Architecture
6961  * implementations can override this.
6962  */
6963 int __weak pci_ext_cfg_avail(void)
6964 {
6965 	return 1;
6966 }
6967 
6968 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6969 {
6970 }
6971 EXPORT_SYMBOL(pci_fixup_cardbus);
6972 
6973 static int __init pci_setup(char *str)
6974 {
6975 	while (str) {
6976 		char *k = strchr(str, ',');
6977 		if (k)
6978 			*k++ = 0;
6979 		if (*str && (str = pcibios_setup(str)) && *str) {
6980 			if (!strcmp(str, "nomsi")) {
6981 				pci_no_msi();
6982 			} else if (!strncmp(str, "noats", 5)) {
6983 				pr_info("PCIe: ATS is disabled\n");
6984 				pcie_ats_disabled = true;
6985 			} else if (!strcmp(str, "noaer")) {
6986 				pci_no_aer();
6987 			} else if (!strcmp(str, "earlydump")) {
6988 				pci_early_dump = true;
6989 			} else if (!strncmp(str, "realloc=", 8)) {
6990 				pci_realloc_get_opt(str + 8);
6991 			} else if (!strncmp(str, "realloc", 7)) {
6992 				pci_realloc_get_opt("on");
6993 			} else if (!strcmp(str, "nodomains")) {
6994 				pci_no_domains();
6995 			} else if (!strncmp(str, "noari", 5)) {
6996 				pcie_ari_disabled = true;
6997 			} else if (!strncmp(str, "cbiosize=", 9)) {
6998 				pci_cardbus_io_size = memparse(str + 9, &str);
6999 			} else if (!strncmp(str, "cbmemsize=", 10)) {
7000 				pci_cardbus_mem_size = memparse(str + 10, &str);
7001 			} else if (!strncmp(str, "resource_alignment=", 19)) {
7002 				resource_alignment_param = str + 19;
7003 			} else if (!strncmp(str, "ecrc=", 5)) {
7004 				pcie_ecrc_get_policy(str + 5);
7005 			} else if (!strncmp(str, "hpiosize=", 9)) {
7006 				pci_hotplug_io_size = memparse(str + 9, &str);
7007 			} else if (!strncmp(str, "hpmmiosize=", 11)) {
7008 				pci_hotplug_mmio_size = memparse(str + 11, &str);
7009 			} else if (!strncmp(str, "hpmmioprefsize=", 15)) {
7010 				pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
7011 			} else if (!strncmp(str, "hpmemsize=", 10)) {
7012 				pci_hotplug_mmio_size = memparse(str + 10, &str);
7013 				pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
7014 			} else if (!strncmp(str, "hpbussize=", 10)) {
7015 				pci_hotplug_bus_size =
7016 					simple_strtoul(str + 10, &str, 0);
7017 				if (pci_hotplug_bus_size > 0xff)
7018 					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
7019 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
7020 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
7021 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
7022 				pcie_bus_config = PCIE_BUS_SAFE;
7023 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
7024 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
7025 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
7026 				pcie_bus_config = PCIE_BUS_PEER2PEER;
7027 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
7028 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
7029 			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
7030 				disable_acs_redir_param = str + 18;
7031 			} else {
7032 				pr_err("PCI: Unknown option `%s'\n", str);
7033 			}
7034 		}
7035 		str = k;
7036 	}
7037 	return 0;
7038 }
7039 early_param("pci", pci_setup);
7040 
7041 /*
7042  * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
7043  * in pci_setup(), above, to point to data in the __initdata section which
7044  * will be freed after the init sequence is complete. We can't allocate memory
7045  * in pci_setup() because some architectures do not have any memory allocation
7046  * service available during an early_param() call. So we allocate memory and
7047  * copy the variable here before the init section is freed.
7048  *
7049  */
7050 static int __init pci_realloc_setup_params(void)
7051 {
7052 	resource_alignment_param = kstrdup(resource_alignment_param,
7053 					   GFP_KERNEL);
7054 	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
7055 
7056 	return 0;
7057 }
7058 pure_initcall(pci_realloc_setup_params);
7059