xref: /linux/drivers/pci/pci.c (revision 43dfc13ca972988e620a6edb72956981b75ab6b0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Bus Services, see include/linux/pci.h for further explanation.
4  *
5  * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6  * David Mosberger-Tang
7  *
8  * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/pci_hotplug.h>
29 #include <linux/vmalloc.h>
30 #include <asm/dma.h>
31 #include <linux/aer.h>
32 #include <linux/bitfield.h>
33 #include "pci.h"
34 
35 DEFINE_MUTEX(pci_slot_mutex);
36 
37 const char *pci_power_names[] = {
38 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
39 };
40 EXPORT_SYMBOL_GPL(pci_power_names);
41 
42 #ifdef CONFIG_X86_32
43 int isa_dma_bridge_buggy;
44 EXPORT_SYMBOL(isa_dma_bridge_buggy);
45 #endif
46 
47 int pci_pci_problems;
48 EXPORT_SYMBOL(pci_pci_problems);
49 
50 unsigned int pci_pm_d3hot_delay;
51 
52 static void pci_pme_list_scan(struct work_struct *work);
53 
54 static LIST_HEAD(pci_pme_list);
55 static DEFINE_MUTEX(pci_pme_list_mutex);
56 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
57 
58 struct pci_pme_device {
59 	struct list_head list;
60 	struct pci_dev *dev;
61 };
62 
63 #define PME_TIMEOUT 1000 /* How long between PME checks */
64 
65 /*
66  * Following exit from Conventional Reset, devices must be ready within 1 sec
67  * (PCIe r6.0 sec 6.6.1).  A D3cold to D0 transition implies a Conventional
68  * Reset (PCIe r6.0 sec 5.8).
69  */
70 #define PCI_RESET_WAIT 1000 /* msec */
71 
72 /*
73  * Devices may extend the 1 sec period through Request Retry Status
74  * completions (PCIe r6.0 sec 2.3.1).  The spec does not provide an upper
75  * limit, but 60 sec ought to be enough for any device to become
76  * responsive.
77  */
78 #define PCIE_RESET_READY_POLL_MS 60000 /* msec */
79 
80 static void pci_dev_d3_sleep(struct pci_dev *dev)
81 {
82 	unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
83 	unsigned int upper;
84 
85 	if (delay_ms) {
86 		/* Use a 20% upper bound, 1ms minimum */
87 		upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
88 		usleep_range(delay_ms * USEC_PER_MSEC,
89 			     (delay_ms + upper) * USEC_PER_MSEC);
90 	}
91 }
92 
93 bool pci_reset_supported(struct pci_dev *dev)
94 {
95 	return dev->reset_methods[0] != 0;
96 }
97 
98 #ifdef CONFIG_PCI_DOMAINS
99 int pci_domains_supported = 1;
100 #endif
101 
102 #define DEFAULT_CARDBUS_IO_SIZE		(256)
103 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
104 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
105 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
106 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
107 
108 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
109 #define DEFAULT_HOTPLUG_MMIO_SIZE	(2*1024*1024)
110 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE	(2*1024*1024)
111 /* hpiosize=nn can override this */
112 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
113 /*
114  * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
115  * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
116  * pci=hpmemsize=nnM overrides both
117  */
118 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
119 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
120 
121 #define DEFAULT_HOTPLUG_BUS_SIZE	1
122 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
123 
124 
125 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
126 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
127 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
128 #elif defined CONFIG_PCIE_BUS_SAFE
129 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
130 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
131 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
132 #elif defined CONFIG_PCIE_BUS_PEER2PEER
133 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
134 #else
135 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
136 #endif
137 
138 /*
139  * The default CLS is used if arch didn't set CLS explicitly and not
140  * all pci devices agree on the same value.  Arch can override either
141  * the dfl or actual value as it sees fit.  Don't forget this is
142  * measured in 32-bit words, not bytes.
143  */
144 u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2;
145 u8 pci_cache_line_size __ro_after_init ;
146 
147 /*
148  * If we set up a device for bus mastering, we need to check the latency
149  * timer as certain BIOSes forget to set it properly.
150  */
151 unsigned int pcibios_max_latency = 255;
152 
153 /* If set, the PCIe ARI capability will not be used. */
154 static bool pcie_ari_disabled;
155 
156 /* If set, the PCIe ATS capability will not be used. */
157 static bool pcie_ats_disabled;
158 
159 /* If set, the PCI config space of each device is printed during boot. */
160 bool pci_early_dump;
161 
162 bool pci_ats_disabled(void)
163 {
164 	return pcie_ats_disabled;
165 }
166 EXPORT_SYMBOL_GPL(pci_ats_disabled);
167 
168 /* Disable bridge_d3 for all PCIe ports */
169 static bool pci_bridge_d3_disable;
170 /* Force bridge_d3 for all PCIe ports */
171 static bool pci_bridge_d3_force;
172 
173 static int __init pcie_port_pm_setup(char *str)
174 {
175 	if (!strcmp(str, "off"))
176 		pci_bridge_d3_disable = true;
177 	else if (!strcmp(str, "force"))
178 		pci_bridge_d3_force = true;
179 	return 1;
180 }
181 __setup("pcie_port_pm=", pcie_port_pm_setup);
182 
183 /**
184  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
185  * @bus: pointer to PCI bus structure to search
186  *
187  * Given a PCI bus, returns the highest PCI bus number present in the set
188  * including the given PCI bus and its list of child PCI buses.
189  */
190 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
191 {
192 	struct pci_bus *tmp;
193 	unsigned char max, n;
194 
195 	max = bus->busn_res.end;
196 	list_for_each_entry(tmp, &bus->children, node) {
197 		n = pci_bus_max_busnr(tmp);
198 		if (n > max)
199 			max = n;
200 	}
201 	return max;
202 }
203 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
204 
205 /**
206  * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
207  * @pdev: the PCI device
208  *
209  * Returns error bits set in PCI_STATUS and clears them.
210  */
211 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
212 {
213 	u16 status;
214 	int ret;
215 
216 	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
217 	if (ret != PCIBIOS_SUCCESSFUL)
218 		return -EIO;
219 
220 	status &= PCI_STATUS_ERROR_BITS;
221 	if (status)
222 		pci_write_config_word(pdev, PCI_STATUS, status);
223 
224 	return status;
225 }
226 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
227 
228 #ifdef CONFIG_HAS_IOMEM
229 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
230 					    bool write_combine)
231 {
232 	struct resource *res = &pdev->resource[bar];
233 	resource_size_t start = res->start;
234 	resource_size_t size = resource_size(res);
235 
236 	/*
237 	 * Make sure the BAR is actually a memory resource, not an IO resource
238 	 */
239 	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
240 		pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
241 		return NULL;
242 	}
243 
244 	if (write_combine)
245 		return ioremap_wc(start, size);
246 
247 	return ioremap(start, size);
248 }
249 
250 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
251 {
252 	return __pci_ioremap_resource(pdev, bar, false);
253 }
254 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
255 
256 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
257 {
258 	return __pci_ioremap_resource(pdev, bar, true);
259 }
260 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
261 #endif
262 
263 /**
264  * pci_dev_str_match_path - test if a path string matches a device
265  * @dev: the PCI device to test
266  * @path: string to match the device against
267  * @endptr: pointer to the string after the match
268  *
269  * Test if a string (typically from a kernel parameter) formatted as a
270  * path of device/function addresses matches a PCI device. The string must
271  * be of the form:
272  *
273  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
274  *
275  * A path for a device can be obtained using 'lspci -t'.  Using a path
276  * is more robust against bus renumbering than using only a single bus,
277  * device and function address.
278  *
279  * Returns 1 if the string matches the device, 0 if it does not and
280  * a negative error code if it fails to parse the string.
281  */
282 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
283 				  const char **endptr)
284 {
285 	int ret;
286 	unsigned int seg, bus, slot, func;
287 	char *wpath, *p;
288 	char end;
289 
290 	*endptr = strchrnul(path, ';');
291 
292 	wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
293 	if (!wpath)
294 		return -ENOMEM;
295 
296 	while (1) {
297 		p = strrchr(wpath, '/');
298 		if (!p)
299 			break;
300 		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
301 		if (ret != 2) {
302 			ret = -EINVAL;
303 			goto free_and_exit;
304 		}
305 
306 		if (dev->devfn != PCI_DEVFN(slot, func)) {
307 			ret = 0;
308 			goto free_and_exit;
309 		}
310 
311 		/*
312 		 * Note: we don't need to get a reference to the upstream
313 		 * bridge because we hold a reference to the top level
314 		 * device which should hold a reference to the bridge,
315 		 * and so on.
316 		 */
317 		dev = pci_upstream_bridge(dev);
318 		if (!dev) {
319 			ret = 0;
320 			goto free_and_exit;
321 		}
322 
323 		*p = 0;
324 	}
325 
326 	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
327 		     &func, &end);
328 	if (ret != 4) {
329 		seg = 0;
330 		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
331 		if (ret != 3) {
332 			ret = -EINVAL;
333 			goto free_and_exit;
334 		}
335 	}
336 
337 	ret = (seg == pci_domain_nr(dev->bus) &&
338 	       bus == dev->bus->number &&
339 	       dev->devfn == PCI_DEVFN(slot, func));
340 
341 free_and_exit:
342 	kfree(wpath);
343 	return ret;
344 }
345 
346 /**
347  * pci_dev_str_match - test if a string matches a device
348  * @dev: the PCI device to test
349  * @p: string to match the device against
350  * @endptr: pointer to the string after the match
351  *
352  * Test if a string (typically from a kernel parameter) matches a specified
353  * PCI device. The string may be of one of the following formats:
354  *
355  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
356  *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
357  *
358  * The first format specifies a PCI bus/device/function address which
359  * may change if new hardware is inserted, if motherboard firmware changes,
360  * or due to changes caused in kernel parameters. If the domain is
361  * left unspecified, it is taken to be 0.  In order to be robust against
362  * bus renumbering issues, a path of PCI device/function numbers may be used
363  * to address the specific device.  The path for a device can be determined
364  * through the use of 'lspci -t'.
365  *
366  * The second format matches devices using IDs in the configuration
367  * space which may match multiple devices in the system. A value of 0
368  * for any field will match all devices. (Note: this differs from
369  * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
370  * legacy reasons and convenience so users don't have to specify
371  * FFFFFFFFs on the command line.)
372  *
373  * Returns 1 if the string matches the device, 0 if it does not and
374  * a negative error code if the string cannot be parsed.
375  */
376 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
377 			     const char **endptr)
378 {
379 	int ret;
380 	int count;
381 	unsigned short vendor, device, subsystem_vendor, subsystem_device;
382 
383 	if (strncmp(p, "pci:", 4) == 0) {
384 		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
385 		p += 4;
386 		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
387 			     &subsystem_vendor, &subsystem_device, &count);
388 		if (ret != 4) {
389 			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
390 			if (ret != 2)
391 				return -EINVAL;
392 
393 			subsystem_vendor = 0;
394 			subsystem_device = 0;
395 		}
396 
397 		p += count;
398 
399 		if ((!vendor || vendor == dev->vendor) &&
400 		    (!device || device == dev->device) &&
401 		    (!subsystem_vendor ||
402 			    subsystem_vendor == dev->subsystem_vendor) &&
403 		    (!subsystem_device ||
404 			    subsystem_device == dev->subsystem_device))
405 			goto found;
406 	} else {
407 		/*
408 		 * PCI Bus, Device, Function IDs are specified
409 		 * (optionally, may include a path of devfns following it)
410 		 */
411 		ret = pci_dev_str_match_path(dev, p, &p);
412 		if (ret < 0)
413 			return ret;
414 		else if (ret)
415 			goto found;
416 	}
417 
418 	*endptr = p;
419 	return 0;
420 
421 found:
422 	*endptr = p;
423 	return 1;
424 }
425 
426 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
427 			      u8 pos, int cap)
428 {
429 	return PCI_FIND_NEXT_CAP(pci_bus_read_config, pos, cap, bus, devfn);
430 }
431 
432 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
433 {
434 	return __pci_find_next_cap(dev->bus, dev->devfn,
435 				   pos + PCI_CAP_LIST_NEXT, cap);
436 }
437 EXPORT_SYMBOL_GPL(pci_find_next_capability);
438 
439 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
440 				    unsigned int devfn, u8 hdr_type)
441 {
442 	u16 status;
443 
444 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
445 	if (!(status & PCI_STATUS_CAP_LIST))
446 		return 0;
447 
448 	switch (hdr_type) {
449 	case PCI_HEADER_TYPE_NORMAL:
450 	case PCI_HEADER_TYPE_BRIDGE:
451 		return PCI_CAPABILITY_LIST;
452 	case PCI_HEADER_TYPE_CARDBUS:
453 		return PCI_CB_CAPABILITY_LIST;
454 	}
455 
456 	return 0;
457 }
458 
459 /**
460  * pci_find_capability - query for devices' capabilities
461  * @dev: PCI device to query
462  * @cap: capability code
463  *
464  * Tell if a device supports a given PCI capability.
465  * Returns the address of the requested capability structure within the
466  * device's PCI configuration space or 0 in case the device does not
467  * support it.  Possible values for @cap include:
468  *
469  *  %PCI_CAP_ID_PM           Power Management
470  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
471  *  %PCI_CAP_ID_VPD          Vital Product Data
472  *  %PCI_CAP_ID_SLOTID       Slot Identification
473  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
474  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
475  *  %PCI_CAP_ID_PCIX         PCI-X
476  *  %PCI_CAP_ID_EXP          PCI Express
477  */
478 u8 pci_find_capability(struct pci_dev *dev, int cap)
479 {
480 	u8 pos;
481 
482 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
483 	if (pos)
484 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
485 
486 	return pos;
487 }
488 EXPORT_SYMBOL(pci_find_capability);
489 
490 /**
491  * pci_bus_find_capability - query for devices' capabilities
492  * @bus: the PCI bus to query
493  * @devfn: PCI device to query
494  * @cap: capability code
495  *
496  * Like pci_find_capability() but works for PCI devices that do not have a
497  * pci_dev structure set up yet.
498  *
499  * Returns the address of the requested capability structure within the
500  * device's PCI configuration space or 0 in case the device does not
501  * support it.
502  */
503 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
504 {
505 	u8 hdr_type, pos;
506 
507 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
508 
509 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK);
510 	if (pos)
511 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
512 
513 	return pos;
514 }
515 EXPORT_SYMBOL(pci_bus_find_capability);
516 
517 /**
518  * pci_find_next_ext_capability - Find an extended capability
519  * @dev: PCI device to query
520  * @start: address at which to start looking (0 to start at beginning of list)
521  * @cap: capability code
522  *
523  * Returns the address of the next matching extended capability structure
524  * within the device's PCI configuration space or 0 if the device does
525  * not support it.  Some capabilities can occur several times, e.g., the
526  * vendor-specific capability, and this provides a way to find them all.
527  */
528 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
529 {
530 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
531 		return 0;
532 
533 	return PCI_FIND_NEXT_EXT_CAP(pci_bus_read_config, start, cap,
534 				     dev->bus, dev->devfn);
535 }
536 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
537 
538 /**
539  * pci_find_ext_capability - Find an extended capability
540  * @dev: PCI device to query
541  * @cap: capability code
542  *
543  * Returns the address of the requested extended capability structure
544  * within the device's PCI configuration space or 0 if the device does
545  * not support it.  Possible values for @cap include:
546  *
547  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
548  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
549  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
550  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
551  */
552 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
553 {
554 	return pci_find_next_ext_capability(dev, 0, cap);
555 }
556 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
557 
558 /**
559  * pci_get_dsn - Read and return the 8-byte Device Serial Number
560  * @dev: PCI device to query
561  *
562  * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
563  * Number.
564  *
565  * Returns the DSN, or zero if the capability does not exist.
566  */
567 u64 pci_get_dsn(struct pci_dev *dev)
568 {
569 	u32 dword;
570 	u64 dsn;
571 	int pos;
572 
573 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
574 	if (!pos)
575 		return 0;
576 
577 	/*
578 	 * The Device Serial Number is two dwords offset 4 bytes from the
579 	 * capability position. The specification says that the first dword is
580 	 * the lower half, and the second dword is the upper half.
581 	 */
582 	pos += 4;
583 	pci_read_config_dword(dev, pos, &dword);
584 	dsn = (u64)dword;
585 	pci_read_config_dword(dev, pos + 4, &dword);
586 	dsn |= ((u64)dword) << 32;
587 
588 	return dsn;
589 }
590 EXPORT_SYMBOL_GPL(pci_get_dsn);
591 
592 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
593 {
594 	int rc;
595 	u8 cap, mask;
596 
597 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
598 		mask = HT_3BIT_CAP_MASK;
599 	else
600 		mask = HT_5BIT_CAP_MASK;
601 
602 	pos = PCI_FIND_NEXT_CAP(pci_bus_read_config, pos,
603 				PCI_CAP_ID_HT, dev->bus, dev->devfn);
604 	while (pos) {
605 		rc = pci_read_config_byte(dev, pos + 3, &cap);
606 		if (rc != PCIBIOS_SUCCESSFUL)
607 			return 0;
608 
609 		if ((cap & mask) == ht_cap)
610 			return pos;
611 
612 		pos = PCI_FIND_NEXT_CAP(pci_bus_read_config,
613 					pos + PCI_CAP_LIST_NEXT,
614 					PCI_CAP_ID_HT, dev->bus,
615 					dev->devfn);
616 	}
617 
618 	return 0;
619 }
620 
621 /**
622  * pci_find_next_ht_capability - query a device's HyperTransport capabilities
623  * @dev: PCI device to query
624  * @pos: Position from which to continue searching
625  * @ht_cap: HyperTransport capability code
626  *
627  * To be used in conjunction with pci_find_ht_capability() to search for
628  * all capabilities matching @ht_cap. @pos should always be a value returned
629  * from pci_find_ht_capability().
630  *
631  * NB. To be 100% safe against broken PCI devices, the caller should take
632  * steps to avoid an infinite loop.
633  */
634 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
635 {
636 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
637 }
638 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
639 
640 /**
641  * pci_find_ht_capability - query a device's HyperTransport capabilities
642  * @dev: PCI device to query
643  * @ht_cap: HyperTransport capability code
644  *
645  * Tell if a device supports a given HyperTransport capability.
646  * Returns an address within the device's PCI configuration space
647  * or 0 in case the device does not support the request capability.
648  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
649  * which has a HyperTransport capability matching @ht_cap.
650  */
651 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
652 {
653 	u8 pos;
654 
655 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
656 	if (pos)
657 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
658 
659 	return pos;
660 }
661 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
662 
663 /**
664  * pci_find_vsec_capability - Find a vendor-specific extended capability
665  * @dev: PCI device to query
666  * @vendor: Vendor ID for which capability is defined
667  * @cap: Vendor-specific capability ID
668  *
669  * If @dev has Vendor ID @vendor, search for a VSEC capability with
670  * VSEC ID @cap. If found, return the capability offset in
671  * config space; otherwise return 0.
672  */
673 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
674 {
675 	u16 vsec = 0;
676 	u32 header;
677 	int ret;
678 
679 	if (vendor != dev->vendor)
680 		return 0;
681 
682 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
683 						     PCI_EXT_CAP_ID_VNDR))) {
684 		ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
685 		if (ret != PCIBIOS_SUCCESSFUL)
686 			continue;
687 
688 		if (PCI_VNDR_HEADER_ID(header) == cap)
689 			return vsec;
690 	}
691 
692 	return 0;
693 }
694 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
695 
696 /**
697  * pci_find_dvsec_capability - Find DVSEC for vendor
698  * @dev: PCI device to query
699  * @vendor: Vendor ID to match for the DVSEC
700  * @dvsec: Designated Vendor-specific capability ID
701  *
702  * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
703  * offset in config space; otherwise return 0.
704  */
705 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
706 {
707 	int pos;
708 
709 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
710 	if (!pos)
711 		return 0;
712 
713 	while (pos) {
714 		u16 v, id;
715 
716 		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
717 		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
718 		if (vendor == v && dvsec == id)
719 			return pos;
720 
721 		pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
722 	}
723 
724 	return 0;
725 }
726 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
727 
728 /**
729  * pci_find_parent_resource - return resource region of parent bus of given
730  *			      region
731  * @dev: PCI device structure contains resources to be searched
732  * @res: child resource record for which parent is sought
733  *
734  * For given resource region of given device, return the resource region of
735  * parent bus the given region is contained in.
736  */
737 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
738 					  struct resource *res)
739 {
740 	const struct pci_bus *bus = dev->bus;
741 	struct resource *r;
742 
743 	pci_bus_for_each_resource(bus, r) {
744 		if (!r)
745 			continue;
746 		if (resource_contains(r, res)) {
747 
748 			/*
749 			 * If the window is prefetchable but the BAR is
750 			 * not, the allocator made a mistake.
751 			 */
752 			if (r->flags & IORESOURCE_PREFETCH &&
753 			    !(res->flags & IORESOURCE_PREFETCH))
754 				return NULL;
755 
756 			/*
757 			 * If we're below a transparent bridge, there may
758 			 * be both a positively-decoded aperture and a
759 			 * subtractively-decoded region that contain the BAR.
760 			 * We want the positively-decoded one, so this depends
761 			 * on pci_bus_for_each_resource() giving us those
762 			 * first.
763 			 */
764 			return r;
765 		}
766 	}
767 	return NULL;
768 }
769 EXPORT_SYMBOL(pci_find_parent_resource);
770 
771 /**
772  * pci_find_resource - Return matching PCI device resource
773  * @dev: PCI device to query
774  * @res: Resource to look for
775  *
776  * Goes over standard PCI resources (BARs) and checks if the given resource
777  * is partially or fully contained in any of them. In that case the
778  * matching resource is returned, %NULL otherwise.
779  */
780 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
781 {
782 	int i;
783 
784 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
785 		struct resource *r = &dev->resource[i];
786 
787 		if (r->start && resource_contains(r, res))
788 			return r;
789 	}
790 
791 	return NULL;
792 }
793 EXPORT_SYMBOL(pci_find_resource);
794 
795 /**
796  * pci_resource_name - Return the name of the PCI resource
797  * @dev: PCI device to query
798  * @i: index of the resource
799  *
800  * Return the standard PCI resource (BAR) name according to their index.
801  */
802 const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
803 {
804 	static const char * const bar_name[] = {
805 		"BAR 0",
806 		"BAR 1",
807 		"BAR 2",
808 		"BAR 3",
809 		"BAR 4",
810 		"BAR 5",
811 		"ROM",
812 #ifdef CONFIG_PCI_IOV
813 		"VF BAR 0",
814 		"VF BAR 1",
815 		"VF BAR 2",
816 		"VF BAR 3",
817 		"VF BAR 4",
818 		"VF BAR 5",
819 #endif
820 		"bridge window",	/* "io" included in %pR */
821 		"bridge window",	/* "mem" included in %pR */
822 		"bridge window",	/* "mem pref" included in %pR */
823 	};
824 	static const char * const cardbus_name[] = {
825 		"BAR 1",
826 		"unknown",
827 		"unknown",
828 		"unknown",
829 		"unknown",
830 		"unknown",
831 #ifdef CONFIG_PCI_IOV
832 		"unknown",
833 		"unknown",
834 		"unknown",
835 		"unknown",
836 		"unknown",
837 		"unknown",
838 #endif
839 		"CardBus bridge window 0",	/* I/O */
840 		"CardBus bridge window 1",	/* I/O */
841 		"CardBus bridge window 0",	/* mem */
842 		"CardBus bridge window 1",	/* mem */
843 	};
844 
845 	if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
846 	    i < ARRAY_SIZE(cardbus_name))
847 		return cardbus_name[i];
848 
849 	if (i < ARRAY_SIZE(bar_name))
850 		return bar_name[i];
851 
852 	return "unknown";
853 }
854 
855 /**
856  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
857  * @dev: the PCI device to operate on
858  * @pos: config space offset of status word
859  * @mask: mask of bit(s) to care about in status word
860  *
861  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
862  */
863 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
864 {
865 	int i;
866 
867 	/* Wait for Transaction Pending bit clean */
868 	for (i = 0; i < 4; i++) {
869 		u16 status;
870 		if (i)
871 			msleep((1 << (i - 1)) * 100);
872 
873 		pci_read_config_word(dev, pos, &status);
874 		if (!(status & mask))
875 			return 1;
876 	}
877 
878 	return 0;
879 }
880 
881 static int pci_acs_enable;
882 
883 /**
884  * pci_request_acs - ask for ACS to be enabled if supported
885  */
886 void pci_request_acs(void)
887 {
888 	pci_acs_enable = 1;
889 }
890 
891 static const char *disable_acs_redir_param;
892 static const char *config_acs_param;
893 
894 struct pci_acs {
895 	u16 cap;
896 	u16 ctrl;
897 	u16 fw_ctrl;
898 };
899 
900 static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
901 			     const char *p, const u16 acs_mask, const u16 acs_flags)
902 {
903 	u16 flags = acs_flags;
904 	u16 mask = acs_mask;
905 	char *delimit;
906 	int ret = 0;
907 
908 	if (!p)
909 		return;
910 
911 	while (*p) {
912 		if (!acs_mask) {
913 			/* Check for ACS flags */
914 			delimit = strstr(p, "@");
915 			if (delimit) {
916 				int end;
917 				u32 shift = 0;
918 
919 				end = delimit - p - 1;
920 				mask = 0;
921 				flags = 0;
922 
923 				while (end > -1) {
924 					if (*(p + end) == '0') {
925 						mask |= 1 << shift;
926 						shift++;
927 						end--;
928 					} else if (*(p + end) == '1') {
929 						mask |= 1 << shift;
930 						flags |= 1 << shift;
931 						shift++;
932 						end--;
933 					} else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
934 						shift++;
935 						end--;
936 					} else {
937 						pci_err(dev, "Invalid ACS flags... Ignoring\n");
938 						return;
939 					}
940 				}
941 				p = delimit + 1;
942 			} else {
943 				pci_err(dev, "ACS Flags missing\n");
944 				return;
945 			}
946 		}
947 
948 		if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
949 			    PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
950 			pci_err(dev, "Invalid ACS flags specified\n");
951 			return;
952 		}
953 
954 		ret = pci_dev_str_match(dev, p, &p);
955 		if (ret < 0) {
956 			pr_info_once("PCI: Can't parse ACS command line parameter\n");
957 			break;
958 		} else if (ret == 1) {
959 			/* Found a match */
960 			break;
961 		}
962 
963 		if (*p != ';' && *p != ',') {
964 			/* End of param or invalid format */
965 			break;
966 		}
967 		p++;
968 	}
969 
970 	if (ret != 1)
971 		return;
972 
973 	if (!pci_dev_specific_disable_acs_redir(dev))
974 		return;
975 
976 	pci_dbg(dev, "ACS mask  = %#06x\n", mask);
977 	pci_dbg(dev, "ACS flags = %#06x\n", flags);
978 	pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl);
979 	pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl);
980 
981 	/*
982 	 * For mask bits that are 0, copy them from the firmware setting
983 	 * and apply flags for all the mask bits that are 1.
984 	 */
985 	caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask);
986 
987 	pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
988 }
989 
990 /**
991  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
992  * @dev: the PCI device
993  * @caps: default ACS controls
994  */
995 static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
996 {
997 	/* Source Validation */
998 	caps->ctrl |= (caps->cap & PCI_ACS_SV);
999 
1000 	/* P2P Request Redirect */
1001 	caps->ctrl |= (caps->cap & PCI_ACS_RR);
1002 
1003 	/* P2P Completion Redirect */
1004 	caps->ctrl |= (caps->cap & PCI_ACS_CR);
1005 
1006 	/* Upstream Forwarding */
1007 	caps->ctrl |= (caps->cap & PCI_ACS_UF);
1008 
1009 	/* Enable Translation Blocking for external devices and noats */
1010 	if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
1011 		caps->ctrl |= (caps->cap & PCI_ACS_TB);
1012 }
1013 
1014 /**
1015  * pci_enable_acs - enable ACS if hardware support it
1016  * @dev: the PCI device
1017  */
1018 static void pci_enable_acs(struct pci_dev *dev)
1019 {
1020 	struct pci_acs caps;
1021 	bool enable_acs = false;
1022 	int pos;
1023 
1024 	/* If an iommu is present we start with kernel default caps */
1025 	if (pci_acs_enable) {
1026 		if (pci_dev_specific_enable_acs(dev))
1027 			enable_acs = true;
1028 	}
1029 
1030 	pos = dev->acs_cap;
1031 	if (!pos)
1032 		return;
1033 
1034 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
1035 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
1036 	caps.fw_ctrl = caps.ctrl;
1037 
1038 	if (enable_acs)
1039 		pci_std_enable_acs(dev, &caps);
1040 
1041 	/*
1042 	 * Always apply caps from the command line, even if there is no iommu.
1043 	 * Trust that the admin has a reason to change the ACS settings.
1044 	 */
1045 	__pci_config_acs(dev, &caps, disable_acs_redir_param,
1046 			 PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
1047 			 ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
1048 	__pci_config_acs(dev, &caps, config_acs_param, 0, 0);
1049 
1050 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
1051 }
1052 
1053 /**
1054  * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
1055  * @dev: PCI device to have its BARs restored
1056  *
1057  * Restore the BAR values for a given device, so as to make it
1058  * accessible by its driver.
1059  */
1060 static void pci_restore_bars(struct pci_dev *dev)
1061 {
1062 	int i;
1063 
1064 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1065 		pci_update_resource(dev, i);
1066 }
1067 
1068 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1069 {
1070 	if (pci_use_mid_pm())
1071 		return true;
1072 
1073 	return acpi_pci_power_manageable(dev);
1074 }
1075 
1076 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1077 					       pci_power_t t)
1078 {
1079 	if (pci_use_mid_pm())
1080 		return mid_pci_set_power_state(dev, t);
1081 
1082 	return acpi_pci_set_power_state(dev, t);
1083 }
1084 
1085 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1086 {
1087 	if (pci_use_mid_pm())
1088 		return mid_pci_get_power_state(dev);
1089 
1090 	return acpi_pci_get_power_state(dev);
1091 }
1092 
1093 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1094 {
1095 	if (!pci_use_mid_pm())
1096 		acpi_pci_refresh_power_state(dev);
1097 }
1098 
1099 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1100 {
1101 	if (pci_use_mid_pm())
1102 		return PCI_POWER_ERROR;
1103 
1104 	return acpi_pci_choose_state(dev);
1105 }
1106 
1107 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1108 {
1109 	if (pci_use_mid_pm())
1110 		return PCI_POWER_ERROR;
1111 
1112 	return acpi_pci_wakeup(dev, enable);
1113 }
1114 
1115 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1116 {
1117 	if (pci_use_mid_pm())
1118 		return false;
1119 
1120 	return acpi_pci_need_resume(dev);
1121 }
1122 
1123 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1124 {
1125 	if (pci_use_mid_pm())
1126 		return false;
1127 
1128 	return acpi_pci_bridge_d3(dev);
1129 }
1130 
1131 /**
1132  * pci_update_current_state - Read power state of given device and cache it
1133  * @dev: PCI device to handle.
1134  * @state: State to cache in case the device doesn't have the PM capability
1135  *
1136  * The power state is read from the PMCSR register, which however is
1137  * inaccessible in D3cold.  The platform firmware is therefore queried first
1138  * to detect accessibility of the register.  In case the platform firmware
1139  * reports an incorrect state or the device isn't power manageable by the
1140  * platform at all, we try to detect D3cold by testing accessibility of the
1141  * vendor ID in config space.
1142  */
1143 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1144 {
1145 	if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1146 		dev->current_state = PCI_D3cold;
1147 	} else if (dev->pm_cap) {
1148 		u16 pmcsr;
1149 
1150 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1151 		if (PCI_POSSIBLE_ERROR(pmcsr)) {
1152 			dev->current_state = PCI_D3cold;
1153 			return;
1154 		}
1155 		dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1156 	} else {
1157 		dev->current_state = state;
1158 	}
1159 }
1160 
1161 /**
1162  * pci_refresh_power_state - Refresh the given device's power state data
1163  * @dev: Target PCI device.
1164  *
1165  * Ask the platform to refresh the devices power state information and invoke
1166  * pci_update_current_state() to update its current PCI power state.
1167  */
1168 void pci_refresh_power_state(struct pci_dev *dev)
1169 {
1170 	platform_pci_refresh_power_state(dev);
1171 	pci_update_current_state(dev, dev->current_state);
1172 }
1173 
1174 /**
1175  * pci_platform_power_transition - Use platform to change device power state
1176  * @dev: PCI device to handle.
1177  * @state: State to put the device into.
1178  */
1179 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1180 {
1181 	int error;
1182 
1183 	error = platform_pci_set_power_state(dev, state);
1184 	if (!error)
1185 		pci_update_current_state(dev, state);
1186 	else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1187 		dev->current_state = PCI_D0;
1188 
1189 	return error;
1190 }
1191 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1192 
1193 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1194 {
1195 	pm_request_resume(&pci_dev->dev);
1196 	return 0;
1197 }
1198 
1199 /**
1200  * pci_resume_bus - Walk given bus and runtime resume devices on it
1201  * @bus: Top bus of the subtree to walk.
1202  */
1203 void pci_resume_bus(struct pci_bus *bus)
1204 {
1205 	if (bus)
1206 		pci_walk_bus(bus, pci_resume_one, NULL);
1207 }
1208 
1209 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1210 {
1211 	int delay = 1;
1212 	bool retrain = false;
1213 	struct pci_dev *root, *bridge;
1214 
1215 	root = pcie_find_root_port(dev);
1216 
1217 	if (pci_is_pcie(dev)) {
1218 		bridge = pci_upstream_bridge(dev);
1219 		if (bridge)
1220 			retrain = true;
1221 	}
1222 
1223 	/*
1224 	 * The caller has already waited long enough after a reset that the
1225 	 * device should respond to config requests, but it may respond
1226 	 * with Request Retry Status (RRS) if it needs more time to
1227 	 * initialize.
1228 	 *
1229 	 * If the device is below a Root Port with Configuration RRS
1230 	 * Software Visibility enabled, reading the Vendor ID returns a
1231 	 * special data value if the device responded with RRS.  Read the
1232 	 * Vendor ID until we get non-RRS status.
1233 	 *
1234 	 * If there's no Root Port or Configuration RRS Software Visibility
1235 	 * is not enabled, the device may still respond with RRS, but
1236 	 * hardware may retry the config request.  If no retries receive
1237 	 * Successful Completion, hardware generally synthesizes ~0
1238 	 * (PCI_ERROR_RESPONSE) data to complete the read.  Reading Vendor
1239 	 * ID for VFs and non-existent devices also returns ~0, so read the
1240 	 * Command register until it returns something other than ~0.
1241 	 */
1242 	for (;;) {
1243 		u32 id;
1244 
1245 		if (pci_dev_is_disconnected(dev)) {
1246 			pci_dbg(dev, "disconnected; not waiting\n");
1247 			return -ENOTTY;
1248 		}
1249 
1250 		if (root && root->config_rrs_sv) {
1251 			pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
1252 			if (!pci_bus_rrs_vendor_id(id))
1253 				break;
1254 		} else {
1255 			pci_read_config_dword(dev, PCI_COMMAND, &id);
1256 			if (!PCI_POSSIBLE_ERROR(id))
1257 				break;
1258 		}
1259 
1260 		if (delay > timeout) {
1261 			pci_warn(dev, "not ready %dms after %s; giving up\n",
1262 				 delay - 1, reset_type);
1263 			return -ENOTTY;
1264 		}
1265 
1266 		if (delay > PCI_RESET_WAIT) {
1267 			if (retrain) {
1268 				retrain = false;
1269 				if (pcie_failed_link_retrain(bridge) == 0) {
1270 					delay = 1;
1271 					continue;
1272 				}
1273 			}
1274 			pci_info(dev, "not ready %dms after %s; waiting\n",
1275 				 delay - 1, reset_type);
1276 		}
1277 
1278 		msleep(delay);
1279 		delay *= 2;
1280 	}
1281 
1282 	if (delay > PCI_RESET_WAIT)
1283 		pci_info(dev, "ready %dms after %s\n", delay - 1,
1284 			 reset_type);
1285 	else
1286 		pci_dbg(dev, "ready %dms after %s\n", delay - 1,
1287 			reset_type);
1288 
1289 	return 0;
1290 }
1291 
1292 /**
1293  * pci_power_up - Put the given device into D0
1294  * @dev: PCI device to power up
1295  *
1296  * On success, return 0 or 1, depending on whether or not it is necessary to
1297  * restore the device's BARs subsequently (1 is returned in that case).
1298  *
1299  * On failure, return a negative error code.  Always return failure if @dev
1300  * lacks a Power Management Capability, even if the platform was able to
1301  * put the device in D0 via non-PCI means.
1302  */
1303 int pci_power_up(struct pci_dev *dev)
1304 {
1305 	bool need_restore;
1306 	pci_power_t state;
1307 	u16 pmcsr;
1308 
1309 	platform_pci_set_power_state(dev, PCI_D0);
1310 
1311 	if (!dev->pm_cap) {
1312 		state = platform_pci_get_power_state(dev);
1313 		if (state == PCI_UNKNOWN)
1314 			dev->current_state = PCI_D0;
1315 		else
1316 			dev->current_state = state;
1317 
1318 		return -EIO;
1319 	}
1320 
1321 	if (pci_dev_is_disconnected(dev)) {
1322 		dev->current_state = PCI_D3cold;
1323 		return -EIO;
1324 	}
1325 
1326 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1327 	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1328 		pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1329 			pci_power_name(dev->current_state));
1330 		dev->current_state = PCI_D3cold;
1331 		return -EIO;
1332 	}
1333 
1334 	state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1335 
1336 	need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1337 			!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1338 
1339 	if (state == PCI_D0)
1340 		goto end;
1341 
1342 	/*
1343 	 * Force the entire word to 0. This doesn't affect PME_Status, disables
1344 	 * PME_En, and sets PowerState to 0.
1345 	 */
1346 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1347 
1348 	/* Mandatory transition delays; see PCI PM 1.2. */
1349 	if (state == PCI_D3hot)
1350 		pci_dev_d3_sleep(dev);
1351 	else if (state == PCI_D2)
1352 		udelay(PCI_PM_D2_DELAY);
1353 
1354 end:
1355 	dev->current_state = PCI_D0;
1356 	if (need_restore)
1357 		return 1;
1358 
1359 	return 0;
1360 }
1361 
1362 /**
1363  * pci_set_full_power_state - Put a PCI device into D0 and update its state
1364  * @dev: PCI device to power up
1365  * @locked: whether pci_bus_sem is held
1366  *
1367  * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1368  * to confirm the state change, restore its BARs if they might be lost and
1369  * reconfigure ASPM in accordance with the new power state.
1370  *
1371  * If pci_restore_state() is going to be called right after a power state change
1372  * to D0, it is more efficient to use pci_power_up() directly instead of this
1373  * function.
1374  */
1375 static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
1376 {
1377 	u16 pmcsr;
1378 	int ret;
1379 
1380 	ret = pci_power_up(dev);
1381 	if (ret < 0) {
1382 		if (dev->current_state == PCI_D0)
1383 			return 0;
1384 
1385 		return ret;
1386 	}
1387 
1388 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1389 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1390 	if (dev->current_state != PCI_D0) {
1391 		pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1392 				     pci_power_name(dev->current_state));
1393 	} else if (ret > 0) {
1394 		/*
1395 		 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1396 		 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1397 		 * from D3hot to D0 _may_ perform an internal reset, thereby
1398 		 * going to "D0 Uninitialized" rather than "D0 Initialized".
1399 		 * For example, at least some versions of the 3c905B and the
1400 		 * 3c556B exhibit this behaviour.
1401 		 *
1402 		 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1403 		 * devices in a D3hot state at boot.  Consequently, we need to
1404 		 * restore at least the BARs so that the device will be
1405 		 * accessible to its driver.
1406 		 */
1407 		pci_restore_bars(dev);
1408 	}
1409 
1410 	if (dev->bus->self)
1411 		pcie_aspm_pm_state_change(dev->bus->self, locked);
1412 
1413 	return 0;
1414 }
1415 
1416 /**
1417  * __pci_dev_set_current_state - Set current state of a PCI device
1418  * @dev: Device to handle
1419  * @data: pointer to state to be set
1420  */
1421 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1422 {
1423 	pci_power_t state = *(pci_power_t *)data;
1424 
1425 	dev->current_state = state;
1426 	return 0;
1427 }
1428 
1429 /**
1430  * pci_bus_set_current_state - Walk given bus and set current state of devices
1431  * @bus: Top bus of the subtree to walk.
1432  * @state: state to be set
1433  */
1434 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1435 {
1436 	if (bus)
1437 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1438 }
1439 
1440 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
1441 {
1442 	if (!bus)
1443 		return;
1444 
1445 	if (locked)
1446 		pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
1447 	else
1448 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1449 }
1450 
1451 /**
1452  * pci_set_low_power_state - Put a PCI device into a low-power state.
1453  * @dev: PCI device to handle.
1454  * @state: PCI power state (D1, D2, D3hot) to put the device into.
1455  * @locked: whether pci_bus_sem is held
1456  *
1457  * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1458  *
1459  * RETURN VALUE:
1460  * -EINVAL if the requested state is invalid.
1461  * -EIO if device does not support PCI PM or its PM capabilities register has a
1462  * wrong version, or device doesn't support the requested state.
1463  * 0 if device already is in the requested state.
1464  * 0 if device's power state has been successfully changed.
1465  */
1466 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1467 {
1468 	u16 pmcsr;
1469 
1470 	if (!dev->pm_cap)
1471 		return -EIO;
1472 
1473 	/*
1474 	 * Validate transition: We can enter D0 from any state, but if
1475 	 * we're already in a low-power state, we can only go deeper.  E.g.,
1476 	 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1477 	 * we'd have to go from D3 to D0, then to D1.
1478 	 */
1479 	if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1480 		pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1481 			pci_power_name(dev->current_state),
1482 			pci_power_name(state));
1483 		return -EINVAL;
1484 	}
1485 
1486 	/* Check if this device supports the desired state */
1487 	if ((state == PCI_D1 && !dev->d1_support)
1488 	   || (state == PCI_D2 && !dev->d2_support))
1489 		return -EIO;
1490 
1491 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1492 	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1493 		pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1494 			pci_power_name(dev->current_state),
1495 			pci_power_name(state));
1496 		dev->current_state = PCI_D3cold;
1497 		return -EIO;
1498 	}
1499 
1500 	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1501 	pmcsr |= state;
1502 
1503 	/* Enter specified state */
1504 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1505 
1506 	/* Mandatory power management transition delays; see PCI PM 1.2. */
1507 	if (state == PCI_D3hot)
1508 		pci_dev_d3_sleep(dev);
1509 	else if (state == PCI_D2)
1510 		udelay(PCI_PM_D2_DELAY);
1511 
1512 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1513 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1514 	if (dev->current_state != state)
1515 		pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1516 				     pci_power_name(dev->current_state),
1517 				     pci_power_name(state));
1518 
1519 	if (dev->bus->self)
1520 		pcie_aspm_pm_state_change(dev->bus->self, locked);
1521 
1522 	return 0;
1523 }
1524 
1525 static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1526 {
1527 	int error;
1528 
1529 	/* Bound the state we're entering */
1530 	if (state > PCI_D3cold)
1531 		state = PCI_D3cold;
1532 	else if (state < PCI_D0)
1533 		state = PCI_D0;
1534 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1535 
1536 		/*
1537 		 * If the device or the parent bridge do not support PCI
1538 		 * PM, ignore the request if we're doing anything other
1539 		 * than putting it into D0 (which would only happen on
1540 		 * boot).
1541 		 */
1542 		return 0;
1543 
1544 	/* Check if we're already there */
1545 	if (dev->current_state == state)
1546 		return 0;
1547 
1548 	if (state == PCI_D0)
1549 		return pci_set_full_power_state(dev, locked);
1550 
1551 	/*
1552 	 * This device is quirked not to be put into D3, so don't put it in
1553 	 * D3
1554 	 */
1555 	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1556 		return 0;
1557 
1558 	if (state == PCI_D3cold) {
1559 		/*
1560 		 * To put the device in D3cold, put it into D3hot in the native
1561 		 * way, then put it into D3cold using platform ops.
1562 		 */
1563 		error = pci_set_low_power_state(dev, PCI_D3hot, locked);
1564 
1565 		if (pci_platform_power_transition(dev, PCI_D3cold))
1566 			return error;
1567 
1568 		/* Powering off a bridge may power off the whole hierarchy */
1569 		if (dev->current_state == PCI_D3cold)
1570 			__pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
1571 	} else {
1572 		error = pci_set_low_power_state(dev, state, locked);
1573 
1574 		if (pci_platform_power_transition(dev, state))
1575 			return error;
1576 	}
1577 
1578 	return 0;
1579 }
1580 
1581 /**
1582  * pci_set_power_state - Set the power state of a PCI device
1583  * @dev: PCI device to handle.
1584  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1585  *
1586  * Transition a device to a new power state, using the platform firmware and/or
1587  * the device's PCI PM registers.
1588  *
1589  * RETURN VALUE:
1590  * -EINVAL if the requested state is invalid.
1591  * -EIO if device does not support PCI PM or its PM capabilities register has a
1592  * wrong version, or device doesn't support the requested state.
1593  * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1594  * 0 if device already is in the requested state.
1595  * 0 if the transition is to D3 but D3 is not supported.
1596  * 0 if device's power state has been successfully changed.
1597  */
1598 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1599 {
1600 	return __pci_set_power_state(dev, state, false);
1601 }
1602 EXPORT_SYMBOL(pci_set_power_state);
1603 
1604 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
1605 {
1606 	lockdep_assert_held(&pci_bus_sem);
1607 
1608 	return __pci_set_power_state(dev, state, true);
1609 }
1610 EXPORT_SYMBOL(pci_set_power_state_locked);
1611 
1612 #define PCI_EXP_SAVE_REGS	7
1613 
1614 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1615 						       u16 cap, bool extended)
1616 {
1617 	struct pci_cap_saved_state *tmp;
1618 
1619 	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1620 		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1621 			return tmp;
1622 	}
1623 	return NULL;
1624 }
1625 
1626 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1627 {
1628 	return _pci_find_saved_cap(dev, cap, false);
1629 }
1630 
1631 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1632 {
1633 	return _pci_find_saved_cap(dev, cap, true);
1634 }
1635 
1636 static int pci_save_pcie_state(struct pci_dev *dev)
1637 {
1638 	int i = 0;
1639 	struct pci_cap_saved_state *save_state;
1640 	u16 *cap;
1641 
1642 	if (!pci_is_pcie(dev))
1643 		return 0;
1644 
1645 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1646 	if (!save_state) {
1647 		pci_err(dev, "buffer not found in %s\n", __func__);
1648 		return -ENOMEM;
1649 	}
1650 
1651 	cap = (u16 *)&save_state->cap.data[0];
1652 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1653 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1654 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1655 	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1656 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1657 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1658 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1659 
1660 	pci_save_aspm_l1ss_state(dev);
1661 	pci_save_ltr_state(dev);
1662 
1663 	return 0;
1664 }
1665 
1666 static void pci_restore_pcie_state(struct pci_dev *dev)
1667 {
1668 	int i = 0;
1669 	struct pci_cap_saved_state *save_state;
1670 	u16 *cap;
1671 
1672 	/*
1673 	 * Restore max latencies (in the LTR capability) before enabling
1674 	 * LTR itself in PCI_EXP_DEVCTL2.
1675 	 */
1676 	pci_restore_ltr_state(dev);
1677 	pci_restore_aspm_l1ss_state(dev);
1678 
1679 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1680 	if (!save_state)
1681 		return;
1682 
1683 	/*
1684 	 * Downstream ports reset the LTR enable bit when link goes down.
1685 	 * Check and re-configure the bit here before restoring device.
1686 	 * PCIe r5.0, sec 7.5.3.16.
1687 	 */
1688 	pci_bridge_reconfigure_ltr(dev);
1689 
1690 	cap = (u16 *)&save_state->cap.data[0];
1691 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1692 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1693 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1694 	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1695 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1696 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1697 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1698 }
1699 
1700 static int pci_save_pcix_state(struct pci_dev *dev)
1701 {
1702 	int pos;
1703 	struct pci_cap_saved_state *save_state;
1704 
1705 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1706 	if (!pos)
1707 		return 0;
1708 
1709 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1710 	if (!save_state) {
1711 		pci_err(dev, "buffer not found in %s\n", __func__);
1712 		return -ENOMEM;
1713 	}
1714 
1715 	pci_read_config_word(dev, pos + PCI_X_CMD,
1716 			     (u16 *)save_state->cap.data);
1717 
1718 	return 0;
1719 }
1720 
1721 static void pci_restore_pcix_state(struct pci_dev *dev)
1722 {
1723 	int i = 0, pos;
1724 	struct pci_cap_saved_state *save_state;
1725 	u16 *cap;
1726 
1727 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1728 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1729 	if (!save_state || !pos)
1730 		return;
1731 	cap = (u16 *)&save_state->cap.data[0];
1732 
1733 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1734 }
1735 
1736 /**
1737  * pci_save_state - save the PCI configuration space of a device before
1738  *		    suspending
1739  * @dev: PCI device that we're dealing with
1740  */
1741 int pci_save_state(struct pci_dev *dev)
1742 {
1743 	int i;
1744 	/* XXX: 100% dword access ok here? */
1745 	for (i = 0; i < 16; i++) {
1746 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1747 		pci_dbg(dev, "save config %#04x: %#010x\n",
1748 			i * 4, dev->saved_config_space[i]);
1749 	}
1750 	dev->state_saved = true;
1751 
1752 	i = pci_save_pcie_state(dev);
1753 	if (i != 0)
1754 		return i;
1755 
1756 	i = pci_save_pcix_state(dev);
1757 	if (i != 0)
1758 		return i;
1759 
1760 	pci_save_dpc_state(dev);
1761 	pci_save_aer_state(dev);
1762 	pci_save_ptm_state(dev);
1763 	pci_save_tph_state(dev);
1764 	return pci_save_vc_state(dev);
1765 }
1766 EXPORT_SYMBOL(pci_save_state);
1767 
1768 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1769 				     u32 saved_val, int retry, bool force)
1770 {
1771 	u32 val;
1772 
1773 	pci_read_config_dword(pdev, offset, &val);
1774 	if (!force && val == saved_val)
1775 		return;
1776 
1777 	for (;;) {
1778 		pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n",
1779 			offset, val, saved_val);
1780 		pci_write_config_dword(pdev, offset, saved_val);
1781 		if (retry-- <= 0)
1782 			return;
1783 
1784 		pci_read_config_dword(pdev, offset, &val);
1785 		if (val == saved_val)
1786 			return;
1787 
1788 		mdelay(1);
1789 	}
1790 }
1791 
1792 static void pci_restore_config_space_range(struct pci_dev *pdev,
1793 					   int start, int end, int retry,
1794 					   bool force)
1795 {
1796 	int index;
1797 
1798 	for (index = end; index >= start; index--)
1799 		pci_restore_config_dword(pdev, 4 * index,
1800 					 pdev->saved_config_space[index],
1801 					 retry, force);
1802 }
1803 
1804 static void pci_restore_config_space(struct pci_dev *pdev)
1805 {
1806 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1807 		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1808 		/* Restore BARs before the command register. */
1809 		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1810 		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1811 	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1812 		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1813 
1814 		/*
1815 		 * Force rewriting of prefetch registers to avoid S3 resume
1816 		 * issues on Intel PCI bridges that occur when these
1817 		 * registers are not explicitly written.
1818 		 */
1819 		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1820 		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1821 	} else {
1822 		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1823 	}
1824 }
1825 
1826 /**
1827  * pci_restore_state - Restore the saved state of a PCI device
1828  * @dev: PCI device that we're dealing with
1829  */
1830 void pci_restore_state(struct pci_dev *dev)
1831 {
1832 	pci_restore_pcie_state(dev);
1833 	pci_restore_pasid_state(dev);
1834 	pci_restore_pri_state(dev);
1835 	pci_restore_ats_state(dev);
1836 	pci_restore_vc_state(dev);
1837 	pci_restore_rebar_state(dev);
1838 	pci_restore_dpc_state(dev);
1839 	pci_restore_ptm_state(dev);
1840 	pci_restore_tph_state(dev);
1841 
1842 	pci_aer_clear_status(dev);
1843 	pci_restore_aer_state(dev);
1844 
1845 	pci_restore_config_space(dev);
1846 
1847 	pci_restore_pcix_state(dev);
1848 	pci_restore_msi_state(dev);
1849 
1850 	/* Restore ACS and IOV configuration state */
1851 	pci_enable_acs(dev);
1852 	pci_restore_iov_state(dev);
1853 
1854 	dev->state_saved = false;
1855 }
1856 EXPORT_SYMBOL(pci_restore_state);
1857 
1858 struct pci_saved_state {
1859 	u32 config_space[16];
1860 	struct pci_cap_saved_data cap[];
1861 };
1862 
1863 /**
1864  * pci_store_saved_state - Allocate and return an opaque struct containing
1865  *			   the device saved state.
1866  * @dev: PCI device that we're dealing with
1867  *
1868  * Return NULL if no state or error.
1869  */
1870 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1871 {
1872 	struct pci_saved_state *state;
1873 	struct pci_cap_saved_state *tmp;
1874 	struct pci_cap_saved_data *cap;
1875 	size_t size;
1876 
1877 	if (!dev->state_saved)
1878 		return NULL;
1879 
1880 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1881 
1882 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1883 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1884 
1885 	state = kzalloc(size, GFP_KERNEL);
1886 	if (!state)
1887 		return NULL;
1888 
1889 	memcpy(state->config_space, dev->saved_config_space,
1890 	       sizeof(state->config_space));
1891 
1892 	cap = state->cap;
1893 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1894 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1895 		memcpy(cap, &tmp->cap, len);
1896 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1897 	}
1898 	/* Empty cap_save terminates list */
1899 
1900 	return state;
1901 }
1902 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1903 
1904 /**
1905  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1906  * @dev: PCI device that we're dealing with
1907  * @state: Saved state returned from pci_store_saved_state()
1908  */
1909 int pci_load_saved_state(struct pci_dev *dev,
1910 			 struct pci_saved_state *state)
1911 {
1912 	struct pci_cap_saved_data *cap;
1913 
1914 	dev->state_saved = false;
1915 
1916 	if (!state)
1917 		return 0;
1918 
1919 	memcpy(dev->saved_config_space, state->config_space,
1920 	       sizeof(state->config_space));
1921 
1922 	cap = state->cap;
1923 	while (cap->size) {
1924 		struct pci_cap_saved_state *tmp;
1925 
1926 		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1927 		if (!tmp || tmp->cap.size != cap->size)
1928 			return -EINVAL;
1929 
1930 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1931 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1932 		       sizeof(struct pci_cap_saved_data) + cap->size);
1933 	}
1934 
1935 	dev->state_saved = true;
1936 	return 0;
1937 }
1938 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1939 
1940 /**
1941  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1942  *				   and free the memory allocated for it.
1943  * @dev: PCI device that we're dealing with
1944  * @state: Pointer to saved state returned from pci_store_saved_state()
1945  */
1946 int pci_load_and_free_saved_state(struct pci_dev *dev,
1947 				  struct pci_saved_state **state)
1948 {
1949 	int ret = pci_load_saved_state(dev, *state);
1950 	kfree(*state);
1951 	*state = NULL;
1952 	return ret;
1953 }
1954 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1955 
1956 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1957 {
1958 	return pci_enable_resources(dev, bars);
1959 }
1960 
1961 static int pci_host_bridge_enable_device(struct pci_dev *dev)
1962 {
1963 	struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
1964 	int err;
1965 
1966 	if (host_bridge && host_bridge->enable_device) {
1967 		err = host_bridge->enable_device(host_bridge, dev);
1968 		if (err)
1969 			return err;
1970 	}
1971 
1972 	return 0;
1973 }
1974 
1975 static void pci_host_bridge_disable_device(struct pci_dev *dev)
1976 {
1977 	struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
1978 
1979 	if (host_bridge && host_bridge->disable_device)
1980 		host_bridge->disable_device(host_bridge, dev);
1981 }
1982 
1983 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1984 {
1985 	int err;
1986 	struct pci_dev *bridge;
1987 	u16 cmd;
1988 	u8 pin;
1989 
1990 	err = pci_set_power_state(dev, PCI_D0);
1991 	if (err < 0 && err != -EIO)
1992 		return err;
1993 
1994 	bridge = pci_upstream_bridge(dev);
1995 	if (bridge)
1996 		pcie_aspm_powersave_config_link(bridge);
1997 
1998 	err = pci_host_bridge_enable_device(dev);
1999 	if (err)
2000 		return err;
2001 
2002 	err = pcibios_enable_device(dev, bars);
2003 	if (err < 0)
2004 		goto err_enable;
2005 	pci_fixup_device(pci_fixup_enable, dev);
2006 
2007 	if (dev->msi_enabled || dev->msix_enabled)
2008 		return 0;
2009 
2010 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
2011 	if (pin) {
2012 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
2013 		if (cmd & PCI_COMMAND_INTX_DISABLE)
2014 			pci_write_config_word(dev, PCI_COMMAND,
2015 					      cmd & ~PCI_COMMAND_INTX_DISABLE);
2016 	}
2017 
2018 	return 0;
2019 
2020 err_enable:
2021 	pci_host_bridge_disable_device(dev);
2022 
2023 	return err;
2024 
2025 }
2026 
2027 /**
2028  * pci_reenable_device - Resume abandoned device
2029  * @dev: PCI device to be resumed
2030  *
2031  * NOTE: This function is a backend of pci_default_resume() and is not supposed
2032  * to be called by normal code, write proper resume handler and use it instead.
2033  */
2034 int pci_reenable_device(struct pci_dev *dev)
2035 {
2036 	if (pci_is_enabled(dev))
2037 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
2038 	return 0;
2039 }
2040 EXPORT_SYMBOL(pci_reenable_device);
2041 
2042 static void pci_enable_bridge(struct pci_dev *dev)
2043 {
2044 	struct pci_dev *bridge;
2045 	int retval;
2046 
2047 	bridge = pci_upstream_bridge(dev);
2048 	if (bridge)
2049 		pci_enable_bridge(bridge);
2050 
2051 	if (pci_is_enabled(dev)) {
2052 		if (!dev->is_busmaster)
2053 			pci_set_master(dev);
2054 		return;
2055 	}
2056 
2057 	retval = pci_enable_device(dev);
2058 	if (retval)
2059 		pci_err(dev, "Error enabling bridge (%d), continuing\n",
2060 			retval);
2061 	pci_set_master(dev);
2062 }
2063 
2064 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
2065 {
2066 	struct pci_dev *bridge;
2067 	int err;
2068 	int i, bars = 0;
2069 
2070 	/*
2071 	 * Power state could be unknown at this point, either due to a fresh
2072 	 * boot or a device removal call.  So get the current power state
2073 	 * so that things like MSI message writing will behave as expected
2074 	 * (e.g. if the device really is in D0 at enable time).
2075 	 */
2076 	pci_update_current_state(dev, dev->current_state);
2077 
2078 	if (atomic_inc_return(&dev->enable_cnt) > 1)
2079 		return 0;		/* already enabled */
2080 
2081 	bridge = pci_upstream_bridge(dev);
2082 	if (bridge)
2083 		pci_enable_bridge(bridge);
2084 
2085 	/* only skip sriov related */
2086 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2087 		if (dev->resource[i].flags & flags)
2088 			bars |= (1 << i);
2089 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2090 		if (dev->resource[i].flags & flags)
2091 			bars |= (1 << i);
2092 
2093 	err = do_pci_enable_device(dev, bars);
2094 	if (err < 0)
2095 		atomic_dec(&dev->enable_cnt);
2096 	return err;
2097 }
2098 
2099 /**
2100  * pci_enable_device_mem - Initialize a device for use with Memory space
2101  * @dev: PCI device to be initialized
2102  *
2103  * Initialize device before it's used by a driver. Ask low-level code
2104  * to enable Memory resources. Wake up the device if it was suspended.
2105  * Beware, this function can fail.
2106  */
2107 int pci_enable_device_mem(struct pci_dev *dev)
2108 {
2109 	return pci_enable_device_flags(dev, IORESOURCE_MEM);
2110 }
2111 EXPORT_SYMBOL(pci_enable_device_mem);
2112 
2113 /**
2114  * pci_enable_device - Initialize device before it's used by a driver.
2115  * @dev: PCI device to be initialized
2116  *
2117  * Initialize device before it's used by a driver. Ask low-level code
2118  * to enable I/O and memory. Wake up the device if it was suspended.
2119  * Beware, this function can fail.
2120  *
2121  * Note we don't actually enable the device many times if we call
2122  * this function repeatedly (we just increment the count).
2123  */
2124 int pci_enable_device(struct pci_dev *dev)
2125 {
2126 	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2127 }
2128 EXPORT_SYMBOL(pci_enable_device);
2129 
2130 /*
2131  * pcibios_device_add - provide arch specific hooks when adding device dev
2132  * @dev: the PCI device being added
2133  *
2134  * Permits the platform to provide architecture specific functionality when
2135  * devices are added. This is the default implementation. Architecture
2136  * implementations can override this.
2137  */
2138 int __weak pcibios_device_add(struct pci_dev *dev)
2139 {
2140 	return 0;
2141 }
2142 
2143 /**
2144  * pcibios_release_device - provide arch specific hooks when releasing
2145  *			    device dev
2146  * @dev: the PCI device being released
2147  *
2148  * Permits the platform to provide architecture specific functionality when
2149  * devices are released. This is the default implementation. Architecture
2150  * implementations can override this.
2151  */
2152 void __weak pcibios_release_device(struct pci_dev *dev) {}
2153 
2154 /**
2155  * pcibios_disable_device - disable arch specific PCI resources for device dev
2156  * @dev: the PCI device to disable
2157  *
2158  * Disables architecture specific PCI resources for the device. This
2159  * is the default implementation. Architecture implementations can
2160  * override this.
2161  */
2162 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2163 
2164 static void do_pci_disable_device(struct pci_dev *dev)
2165 {
2166 	u16 pci_command;
2167 
2168 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2169 	if (pci_command & PCI_COMMAND_MASTER) {
2170 		pci_command &= ~PCI_COMMAND_MASTER;
2171 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
2172 	}
2173 
2174 	pcibios_disable_device(dev);
2175 }
2176 
2177 /**
2178  * pci_disable_enabled_device - Disable device without updating enable_cnt
2179  * @dev: PCI device to disable
2180  *
2181  * NOTE: This function is a backend of PCI power management routines and is
2182  * not supposed to be called drivers.
2183  */
2184 void pci_disable_enabled_device(struct pci_dev *dev)
2185 {
2186 	if (pci_is_enabled(dev))
2187 		do_pci_disable_device(dev);
2188 }
2189 
2190 /**
2191  * pci_disable_device - Disable PCI device after use
2192  * @dev: PCI device to be disabled
2193  *
2194  * Signal to the system that the PCI device is not in use by the system
2195  * anymore.  This only involves disabling PCI bus-mastering, if active.
2196  *
2197  * Note we don't actually disable the device until all callers of
2198  * pci_enable_device() have called pci_disable_device().
2199  */
2200 void pci_disable_device(struct pci_dev *dev)
2201 {
2202 	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2203 		      "disabling already-disabled device");
2204 
2205 	if (atomic_dec_return(&dev->enable_cnt) != 0)
2206 		return;
2207 
2208 	pci_host_bridge_disable_device(dev);
2209 
2210 	do_pci_disable_device(dev);
2211 
2212 	dev->is_busmaster = 0;
2213 }
2214 EXPORT_SYMBOL(pci_disable_device);
2215 
2216 /**
2217  * pcibios_set_pcie_reset_state - set reset state for device dev
2218  * @dev: the PCIe device reset
2219  * @state: Reset state to enter into
2220  *
2221  * Set the PCIe reset state for the device. This is the default
2222  * implementation. Architecture implementations can override this.
2223  */
2224 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2225 					enum pcie_reset_state state)
2226 {
2227 	return -EINVAL;
2228 }
2229 
2230 /**
2231  * pci_set_pcie_reset_state - set reset state for device dev
2232  * @dev: the PCIe device reset
2233  * @state: Reset state to enter into
2234  *
2235  * Sets the PCI reset state for the device.
2236  */
2237 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2238 {
2239 	return pcibios_set_pcie_reset_state(dev, state);
2240 }
2241 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2242 
2243 #ifdef CONFIG_PCIEAER
2244 void pcie_clear_device_status(struct pci_dev *dev)
2245 {
2246 	u16 sta;
2247 
2248 	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2249 	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2250 }
2251 #endif
2252 
2253 /**
2254  * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2255  * @dev: PCIe root port or event collector.
2256  */
2257 void pcie_clear_root_pme_status(struct pci_dev *dev)
2258 {
2259 	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2260 }
2261 
2262 /**
2263  * pci_check_pme_status - Check if given device has generated PME.
2264  * @dev: Device to check.
2265  *
2266  * Check the PME status of the device and if set, clear it and clear PME enable
2267  * (if set).  Return 'true' if PME status and PME enable were both set or
2268  * 'false' otherwise.
2269  */
2270 bool pci_check_pme_status(struct pci_dev *dev)
2271 {
2272 	int pmcsr_pos;
2273 	u16 pmcsr;
2274 	bool ret = false;
2275 
2276 	if (!dev->pm_cap)
2277 		return false;
2278 
2279 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2280 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2281 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2282 		return false;
2283 
2284 	/* Clear PME status. */
2285 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2286 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2287 		/* Disable PME to avoid interrupt flood. */
2288 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2289 		ret = true;
2290 	}
2291 
2292 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2293 
2294 	return ret;
2295 }
2296 
2297 /**
2298  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2299  * @dev: Device to handle.
2300  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2301  *
2302  * Check if @dev has generated PME and queue a resume request for it in that
2303  * case.
2304  */
2305 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2306 {
2307 	if (pme_poll_reset && dev->pme_poll)
2308 		dev->pme_poll = false;
2309 
2310 	if (pci_check_pme_status(dev)) {
2311 		pci_wakeup_event(dev);
2312 		pm_request_resume(&dev->dev);
2313 	}
2314 	return 0;
2315 }
2316 
2317 /**
2318  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2319  * @bus: Top bus of the subtree to walk.
2320  */
2321 void pci_pme_wakeup_bus(struct pci_bus *bus)
2322 {
2323 	if (bus)
2324 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2325 }
2326 
2327 
2328 /**
2329  * pci_pme_capable - check the capability of PCI device to generate PME#
2330  * @dev: PCI device to handle.
2331  * @state: PCI state from which device will issue PME#.
2332  */
2333 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2334 {
2335 	if (!dev->pm_cap)
2336 		return false;
2337 
2338 	return !!(dev->pme_support & (1 << state));
2339 }
2340 EXPORT_SYMBOL(pci_pme_capable);
2341 
2342 static void pci_pme_list_scan(struct work_struct *work)
2343 {
2344 	struct pci_pme_device *pme_dev, *n;
2345 
2346 	mutex_lock(&pci_pme_list_mutex);
2347 	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2348 		struct pci_dev *pdev = pme_dev->dev;
2349 
2350 		if (pdev->pme_poll) {
2351 			struct pci_dev *bridge = pdev->bus->self;
2352 			struct device *dev = &pdev->dev;
2353 			struct device *bdev = bridge ? &bridge->dev : NULL;
2354 			int bref = 0;
2355 
2356 			/*
2357 			 * If we have a bridge, it should be in an active/D0
2358 			 * state or the configuration space of subordinate
2359 			 * devices may not be accessible or stable over the
2360 			 * course of the call.
2361 			 */
2362 			if (bdev) {
2363 				bref = pm_runtime_get_if_active(bdev);
2364 				if (!bref)
2365 					continue;
2366 
2367 				if (bridge->current_state != PCI_D0)
2368 					goto put_bridge;
2369 			}
2370 
2371 			/*
2372 			 * The device itself should be suspended but config
2373 			 * space must be accessible, therefore it cannot be in
2374 			 * D3cold.
2375 			 */
2376 			if (pm_runtime_suspended(dev) &&
2377 			    pdev->current_state != PCI_D3cold)
2378 				pci_pme_wakeup(pdev, NULL);
2379 
2380 put_bridge:
2381 			if (bref > 0)
2382 				pm_runtime_put(bdev);
2383 		} else {
2384 			list_del(&pme_dev->list);
2385 			kfree(pme_dev);
2386 		}
2387 	}
2388 	if (!list_empty(&pci_pme_list))
2389 		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2390 				   msecs_to_jiffies(PME_TIMEOUT));
2391 	mutex_unlock(&pci_pme_list_mutex);
2392 }
2393 
2394 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2395 {
2396 	u16 pmcsr;
2397 
2398 	if (!dev->pme_support)
2399 		return;
2400 
2401 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2402 	/* Clear PME_Status by writing 1 to it and enable PME# */
2403 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2404 	if (!enable)
2405 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2406 
2407 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2408 }
2409 
2410 /**
2411  * pci_pme_restore - Restore PME configuration after config space restore.
2412  * @dev: PCI device to update.
2413  */
2414 void pci_pme_restore(struct pci_dev *dev)
2415 {
2416 	u16 pmcsr;
2417 
2418 	if (!dev->pme_support)
2419 		return;
2420 
2421 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2422 	if (dev->wakeup_prepared) {
2423 		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2424 		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2425 	} else {
2426 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2427 		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2428 	}
2429 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2430 }
2431 
2432 /**
2433  * pci_pme_active - enable or disable PCI device's PME# function
2434  * @dev: PCI device to handle.
2435  * @enable: 'true' to enable PME# generation; 'false' to disable it.
2436  *
2437  * The caller must verify that the device is capable of generating PME# before
2438  * calling this function with @enable equal to 'true'.
2439  */
2440 void pci_pme_active(struct pci_dev *dev, bool enable)
2441 {
2442 	__pci_pme_active(dev, enable);
2443 
2444 	/*
2445 	 * PCI (as opposed to PCIe) PME requires that the device have
2446 	 * its PME# line hooked up correctly. Not all hardware vendors
2447 	 * do this, so the PME never gets delivered and the device
2448 	 * remains asleep. The easiest way around this is to
2449 	 * periodically walk the list of suspended devices and check
2450 	 * whether any have their PME flag set. The assumption is that
2451 	 * we'll wake up often enough anyway that this won't be a huge
2452 	 * hit, and the power savings from the devices will still be a
2453 	 * win.
2454 	 *
2455 	 * Although PCIe uses in-band PME message instead of PME# line
2456 	 * to report PME, PME does not work for some PCIe devices in
2457 	 * reality.  For example, there are devices that set their PME
2458 	 * status bits, but don't really bother to send a PME message;
2459 	 * there are PCI Express Root Ports that don't bother to
2460 	 * trigger interrupts when they receive PME messages from the
2461 	 * devices below.  So PME poll is used for PCIe devices too.
2462 	 */
2463 
2464 	if (dev->pme_poll) {
2465 		struct pci_pme_device *pme_dev;
2466 		if (enable) {
2467 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
2468 					  GFP_KERNEL);
2469 			if (!pme_dev) {
2470 				pci_warn(dev, "can't enable PME#\n");
2471 				return;
2472 			}
2473 			pme_dev->dev = dev;
2474 			mutex_lock(&pci_pme_list_mutex);
2475 			list_add(&pme_dev->list, &pci_pme_list);
2476 			if (list_is_singular(&pci_pme_list))
2477 				queue_delayed_work(system_freezable_wq,
2478 						   &pci_pme_work,
2479 						   msecs_to_jiffies(PME_TIMEOUT));
2480 			mutex_unlock(&pci_pme_list_mutex);
2481 		} else {
2482 			mutex_lock(&pci_pme_list_mutex);
2483 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2484 				if (pme_dev->dev == dev) {
2485 					list_del(&pme_dev->list);
2486 					kfree(pme_dev);
2487 					break;
2488 				}
2489 			}
2490 			mutex_unlock(&pci_pme_list_mutex);
2491 		}
2492 	}
2493 
2494 	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2495 }
2496 EXPORT_SYMBOL(pci_pme_active);
2497 
2498 /**
2499  * __pci_enable_wake - enable PCI device as wakeup event source
2500  * @dev: PCI device affected
2501  * @state: PCI state from which device will issue wakeup events
2502  * @enable: True to enable event generation; false to disable
2503  *
2504  * This enables the device as a wakeup event source, or disables it.
2505  * When such events involves platform-specific hooks, those hooks are
2506  * called automatically by this routine.
2507  *
2508  * Devices with legacy power management (no standard PCI PM capabilities)
2509  * always require such platform hooks.
2510  *
2511  * RETURN VALUE:
2512  * 0 is returned on success
2513  * -EINVAL is returned if device is not supposed to wake up the system
2514  * Error code depending on the platform is returned if both the platform and
2515  * the native mechanism fail to enable the generation of wake-up events
2516  */
2517 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2518 {
2519 	int ret = 0;
2520 
2521 	/*
2522 	 * Bridges that are not power-manageable directly only signal
2523 	 * wakeup on behalf of subordinate devices which is set up
2524 	 * elsewhere, so skip them. However, bridges that are
2525 	 * power-manageable may signal wakeup for themselves (for example,
2526 	 * on a hotplug event) and they need to be covered here.
2527 	 */
2528 	if (!pci_power_manageable(dev))
2529 		return 0;
2530 
2531 	/* Don't do the same thing twice in a row for one device. */
2532 	if (!!enable == !!dev->wakeup_prepared)
2533 		return 0;
2534 
2535 	/*
2536 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2537 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2538 	 * enable.  To disable wake-up we call the platform first, for symmetry.
2539 	 */
2540 
2541 	if (enable) {
2542 		int error;
2543 
2544 		/*
2545 		 * Enable PME signaling if the device can signal PME from
2546 		 * D3cold regardless of whether or not it can signal PME from
2547 		 * the current target state, because that will allow it to
2548 		 * signal PME when the hierarchy above it goes into D3cold and
2549 		 * the device itself ends up in D3cold as a result of that.
2550 		 */
2551 		if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2552 			pci_pme_active(dev, true);
2553 		else
2554 			ret = 1;
2555 		error = platform_pci_set_wakeup(dev, true);
2556 		if (ret)
2557 			ret = error;
2558 		if (!ret)
2559 			dev->wakeup_prepared = true;
2560 	} else {
2561 		platform_pci_set_wakeup(dev, false);
2562 		pci_pme_active(dev, false);
2563 		dev->wakeup_prepared = false;
2564 	}
2565 
2566 	return ret;
2567 }
2568 
2569 /**
2570  * pci_enable_wake - change wakeup settings for a PCI device
2571  * @pci_dev: Target device
2572  * @state: PCI state from which device will issue wakeup events
2573  * @enable: Whether or not to enable event generation
2574  *
2575  * If @enable is set, check device_may_wakeup() for the device before calling
2576  * __pci_enable_wake() for it.
2577  */
2578 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2579 {
2580 	if (enable && !device_may_wakeup(&pci_dev->dev))
2581 		return -EINVAL;
2582 
2583 	return __pci_enable_wake(pci_dev, state, enable);
2584 }
2585 EXPORT_SYMBOL(pci_enable_wake);
2586 
2587 /**
2588  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2589  * @dev: PCI device to prepare
2590  * @enable: True to enable wake-up event generation; false to disable
2591  *
2592  * Many drivers want the device to wake up the system from D3_hot or D3_cold
2593  * and this function allows them to set that up cleanly - pci_enable_wake()
2594  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2595  * ordering constraints.
2596  *
2597  * This function only returns error code if the device is not allowed to wake
2598  * up the system from sleep or it is not capable of generating PME# from both
2599  * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2600  */
2601 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2602 {
2603 	return pci_pme_capable(dev, PCI_D3cold) ?
2604 			pci_enable_wake(dev, PCI_D3cold, enable) :
2605 			pci_enable_wake(dev, PCI_D3hot, enable);
2606 }
2607 EXPORT_SYMBOL(pci_wake_from_d3);
2608 
2609 /**
2610  * pci_target_state - find an appropriate low power state for a given PCI dev
2611  * @dev: PCI device
2612  * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2613  *
2614  * Use underlying platform code to find a supported low power state for @dev.
2615  * If the platform can't manage @dev, return the deepest state from which it
2616  * can generate wake events, based on any available PME info.
2617  */
2618 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2619 {
2620 	if (platform_pci_power_manageable(dev)) {
2621 		/*
2622 		 * Call the platform to find the target state for the device.
2623 		 */
2624 		pci_power_t state = platform_pci_choose_state(dev);
2625 
2626 		switch (state) {
2627 		case PCI_POWER_ERROR:
2628 		case PCI_UNKNOWN:
2629 			return PCI_D3hot;
2630 
2631 		case PCI_D1:
2632 		case PCI_D2:
2633 			if (pci_no_d1d2(dev))
2634 				return PCI_D3hot;
2635 		}
2636 
2637 		return state;
2638 	}
2639 
2640 	/*
2641 	 * If the device is in D3cold even though it's not power-manageable by
2642 	 * the platform, it may have been powered down by non-standard means.
2643 	 * Best to let it slumber.
2644 	 */
2645 	if (dev->current_state == PCI_D3cold)
2646 		return PCI_D3cold;
2647 	else if (!dev->pm_cap)
2648 		return PCI_D0;
2649 
2650 	if (wakeup && dev->pme_support) {
2651 		pci_power_t state = PCI_D3hot;
2652 
2653 		/*
2654 		 * Find the deepest state from which the device can generate
2655 		 * PME#.
2656 		 */
2657 		while (state && !(dev->pme_support & (1 << state)))
2658 			state--;
2659 
2660 		if (state)
2661 			return state;
2662 		else if (dev->pme_support & 1)
2663 			return PCI_D0;
2664 	}
2665 
2666 	return PCI_D3hot;
2667 }
2668 
2669 /**
2670  * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2671  *			  into a sleep state
2672  * @dev: Device to handle.
2673  *
2674  * Choose the power state appropriate for the device depending on whether
2675  * it can wake up the system and/or is power manageable by the platform
2676  * (PCI_D3hot is the default) and put the device into that state.
2677  */
2678 int pci_prepare_to_sleep(struct pci_dev *dev)
2679 {
2680 	bool wakeup = device_may_wakeup(&dev->dev);
2681 	pci_power_t target_state = pci_target_state(dev, wakeup);
2682 	int error;
2683 
2684 	if (target_state == PCI_POWER_ERROR)
2685 		return -EIO;
2686 
2687 	pci_enable_wake(dev, target_state, wakeup);
2688 
2689 	error = pci_set_power_state(dev, target_state);
2690 
2691 	if (error)
2692 		pci_enable_wake(dev, target_state, false);
2693 
2694 	return error;
2695 }
2696 EXPORT_SYMBOL(pci_prepare_to_sleep);
2697 
2698 /**
2699  * pci_back_from_sleep - turn PCI device on during system-wide transition
2700  *			 into working state
2701  * @dev: Device to handle.
2702  *
2703  * Disable device's system wake-up capability and put it into D0.
2704  */
2705 int pci_back_from_sleep(struct pci_dev *dev)
2706 {
2707 	int ret = pci_set_power_state(dev, PCI_D0);
2708 
2709 	if (ret)
2710 		return ret;
2711 
2712 	pci_enable_wake(dev, PCI_D0, false);
2713 	return 0;
2714 }
2715 EXPORT_SYMBOL(pci_back_from_sleep);
2716 
2717 /**
2718  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2719  * @dev: PCI device being suspended.
2720  *
2721  * Prepare @dev to generate wake-up events at run time and put it into a low
2722  * power state.
2723  */
2724 int pci_finish_runtime_suspend(struct pci_dev *dev)
2725 {
2726 	pci_power_t target_state;
2727 	int error;
2728 
2729 	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2730 	if (target_state == PCI_POWER_ERROR)
2731 		return -EIO;
2732 
2733 	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2734 
2735 	error = pci_set_power_state(dev, target_state);
2736 
2737 	if (error)
2738 		pci_enable_wake(dev, target_state, false);
2739 
2740 	return error;
2741 }
2742 
2743 /**
2744  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2745  * @dev: Device to check.
2746  *
2747  * Return true if the device itself is capable of generating wake-up events
2748  * (through the platform or using the native PCIe PME) or if the device supports
2749  * PME and one of its upstream bridges can generate wake-up events.
2750  */
2751 bool pci_dev_run_wake(struct pci_dev *dev)
2752 {
2753 	struct pci_bus *bus = dev->bus;
2754 
2755 	if (!dev->pme_support)
2756 		return false;
2757 
2758 	/* PME-capable in principle, but not from the target power state */
2759 	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2760 		return false;
2761 
2762 	if (device_can_wakeup(&dev->dev))
2763 		return true;
2764 
2765 	while (bus->parent) {
2766 		struct pci_dev *bridge = bus->self;
2767 
2768 		if (device_can_wakeup(&bridge->dev))
2769 			return true;
2770 
2771 		bus = bus->parent;
2772 	}
2773 
2774 	/* We have reached the root bus. */
2775 	if (bus->bridge)
2776 		return device_can_wakeup(bus->bridge);
2777 
2778 	return false;
2779 }
2780 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2781 
2782 /**
2783  * pci_dev_need_resume - Check if it is necessary to resume the device.
2784  * @pci_dev: Device to check.
2785  *
2786  * Return 'true' if the device is not runtime-suspended or it has to be
2787  * reconfigured due to wakeup settings difference between system and runtime
2788  * suspend, or the current power state of it is not suitable for the upcoming
2789  * (system-wide) transition.
2790  */
2791 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2792 {
2793 	struct device *dev = &pci_dev->dev;
2794 	pci_power_t target_state;
2795 
2796 	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2797 		return true;
2798 
2799 	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2800 
2801 	/*
2802 	 * If the earlier platform check has not triggered, D3cold is just power
2803 	 * removal on top of D3hot, so no need to resume the device in that
2804 	 * case.
2805 	 */
2806 	return target_state != pci_dev->current_state &&
2807 		target_state != PCI_D3cold &&
2808 		pci_dev->current_state != PCI_D3hot;
2809 }
2810 
2811 /**
2812  * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2813  * @pci_dev: Device to check.
2814  *
2815  * If the device is suspended and it is not configured for system wakeup,
2816  * disable PME for it to prevent it from waking up the system unnecessarily.
2817  *
2818  * Note that if the device's power state is D3cold and the platform check in
2819  * pci_dev_need_resume() has not triggered, the device's configuration need not
2820  * be changed.
2821  */
2822 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2823 {
2824 	struct device *dev = &pci_dev->dev;
2825 
2826 	spin_lock_irq(&dev->power.lock);
2827 
2828 	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2829 	    pci_dev->current_state < PCI_D3cold)
2830 		__pci_pme_active(pci_dev, false);
2831 
2832 	spin_unlock_irq(&dev->power.lock);
2833 }
2834 
2835 /**
2836  * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2837  * @pci_dev: Device to handle.
2838  *
2839  * If the device is runtime suspended and wakeup-capable, enable PME for it as
2840  * it might have been disabled during the prepare phase of system suspend if
2841  * the device was not configured for system wakeup.
2842  */
2843 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2844 {
2845 	struct device *dev = &pci_dev->dev;
2846 
2847 	if (!pci_dev_run_wake(pci_dev))
2848 		return;
2849 
2850 	spin_lock_irq(&dev->power.lock);
2851 
2852 	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2853 		__pci_pme_active(pci_dev, true);
2854 
2855 	spin_unlock_irq(&dev->power.lock);
2856 }
2857 
2858 /**
2859  * pci_choose_state - Choose the power state of a PCI device.
2860  * @dev: Target PCI device.
2861  * @state: Target state for the whole system.
2862  *
2863  * Returns PCI power state suitable for @dev and @state.
2864  */
2865 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2866 {
2867 	if (state.event == PM_EVENT_ON)
2868 		return PCI_D0;
2869 
2870 	return pci_target_state(dev, false);
2871 }
2872 EXPORT_SYMBOL(pci_choose_state);
2873 
2874 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2875 {
2876 	struct device *dev = &pdev->dev;
2877 	struct device *parent = dev->parent;
2878 
2879 	if (parent)
2880 		pm_runtime_get_sync(parent);
2881 	pm_runtime_get_noresume(dev);
2882 	/*
2883 	 * pdev->current_state is set to PCI_D3cold during suspending,
2884 	 * so wait until suspending completes
2885 	 */
2886 	pm_runtime_barrier(dev);
2887 	/*
2888 	 * Only need to resume devices in D3cold, because config
2889 	 * registers are still accessible for devices suspended but
2890 	 * not in D3cold.
2891 	 */
2892 	if (pdev->current_state == PCI_D3cold)
2893 		pm_runtime_resume(dev);
2894 }
2895 
2896 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2897 {
2898 	struct device *dev = &pdev->dev;
2899 	struct device *parent = dev->parent;
2900 
2901 	pm_runtime_put(dev);
2902 	if (parent)
2903 		pm_runtime_put_sync(parent);
2904 }
2905 
2906 static const struct dmi_system_id bridge_d3_blacklist[] = {
2907 #ifdef CONFIG_X86
2908 	{
2909 		/*
2910 		 * Gigabyte X299 root port is not marked as hotplug capable
2911 		 * which allows Linux to power manage it.  However, this
2912 		 * confuses the BIOS SMI handler so don't power manage root
2913 		 * ports on that system.
2914 		 */
2915 		.ident = "X299 DESIGNARE EX-CF",
2916 		.matches = {
2917 			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2918 			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2919 		},
2920 	},
2921 	{
2922 		/*
2923 		 * Downstream device is not accessible after putting a root port
2924 		 * into D3cold and back into D0 on Elo Continental Z2 board
2925 		 */
2926 		.ident = "Elo Continental Z2",
2927 		.matches = {
2928 			DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
2929 			DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
2930 			DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
2931 		},
2932 	},
2933 	{
2934 		/*
2935 		 * Changing power state of root port dGPU is connected fails
2936 		 * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
2937 		 */
2938 		.ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
2939 		.matches = {
2940 			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
2941 			DMI_MATCH(DMI_BOARD_NAME, "1972"),
2942 			DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
2943 		},
2944 	},
2945 #endif
2946 	{ }
2947 };
2948 
2949 /**
2950  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2951  * @bridge: Bridge to check
2952  *
2953  * Currently we only allow D3 for some PCIe ports and for Thunderbolt.
2954  *
2955  * Return: Whether it is possible to move the bridge to D3.
2956  *
2957  * The return value is guaranteed to be constant across the entire lifetime
2958  * of the bridge, including its hot-removal.
2959  */
2960 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2961 {
2962 	if (!pci_is_pcie(bridge))
2963 		return false;
2964 
2965 	switch (pci_pcie_type(bridge)) {
2966 	case PCI_EXP_TYPE_ROOT_PORT:
2967 	case PCI_EXP_TYPE_UPSTREAM:
2968 	case PCI_EXP_TYPE_DOWNSTREAM:
2969 		if (pci_bridge_d3_disable)
2970 			return false;
2971 
2972 		/*
2973 		 * Hotplug ports handled by platform firmware may not be put
2974 		 * into D3 by the OS, e.g. ACPI slots ...
2975 		 */
2976 		if (bridge->is_hotplug_bridge && !bridge->is_pciehp)
2977 			return false;
2978 
2979 		/* ... or PCIe hotplug ports not handled natively by the OS. */
2980 		if (bridge->is_pciehp && !pciehp_is_native(bridge))
2981 			return false;
2982 
2983 		if (pci_bridge_d3_force)
2984 			return true;
2985 
2986 		/* Even the oldest 2010 Thunderbolt controller supports D3. */
2987 		if (bridge->is_thunderbolt)
2988 			return true;
2989 
2990 		/* Platform might know better if the bridge supports D3 */
2991 		if (platform_pci_bridge_d3(bridge))
2992 			return true;
2993 
2994 		/*
2995 		 * Hotplug ports handled natively by the OS were not validated
2996 		 * by vendors for runtime D3 at least until 2018 because there
2997 		 * was no OS support.
2998 		 */
2999 		if (bridge->is_pciehp)
3000 			return false;
3001 
3002 		if (dmi_check_system(bridge_d3_blacklist))
3003 			return false;
3004 
3005 		/*
3006 		 * Out of caution, we only allow PCIe ports from 2015 or newer
3007 		 * into D3 on x86.
3008 		 */
3009 		if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015)
3010 			return true;
3011 		break;
3012 	}
3013 
3014 	return false;
3015 }
3016 
3017 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3018 {
3019 	bool *d3cold_ok = data;
3020 
3021 	if (/* The device needs to be allowed to go D3cold ... */
3022 	    dev->no_d3cold || !dev->d3cold_allowed ||
3023 
3024 	    /* ... and if it is wakeup capable to do so from D3cold. */
3025 	    (device_may_wakeup(&dev->dev) &&
3026 	     !pci_pme_capable(dev, PCI_D3cold)) ||
3027 
3028 	    /* If it is a bridge it must be allowed to go to D3. */
3029 	    !pci_power_manageable(dev))
3030 
3031 		*d3cold_ok = false;
3032 
3033 	return !*d3cold_ok;
3034 }
3035 
3036 /*
3037  * pci_bridge_d3_update - Update bridge D3 capabilities
3038  * @dev: PCI device which is changed
3039  *
3040  * Update upstream bridge PM capabilities accordingly depending on if the
3041  * device PM configuration was changed or the device is being removed.  The
3042  * change is also propagated upstream.
3043  */
3044 void pci_bridge_d3_update(struct pci_dev *dev)
3045 {
3046 	bool remove = !device_is_registered(&dev->dev);
3047 	struct pci_dev *bridge;
3048 	bool d3cold_ok = true;
3049 
3050 	bridge = pci_upstream_bridge(dev);
3051 	if (!bridge || !pci_bridge_d3_possible(bridge))
3052 		return;
3053 
3054 	/*
3055 	 * If D3 is currently allowed for the bridge, removing one of its
3056 	 * children won't change that.
3057 	 */
3058 	if (remove && bridge->bridge_d3)
3059 		return;
3060 
3061 	/*
3062 	 * If D3 is currently allowed for the bridge and a child is added or
3063 	 * changed, disallowance of D3 can only be caused by that child, so
3064 	 * we only need to check that single device, not any of its siblings.
3065 	 *
3066 	 * If D3 is currently not allowed for the bridge, checking the device
3067 	 * first may allow us to skip checking its siblings.
3068 	 */
3069 	if (!remove)
3070 		pci_dev_check_d3cold(dev, &d3cold_ok);
3071 
3072 	/*
3073 	 * If D3 is currently not allowed for the bridge, this may be caused
3074 	 * either by the device being changed/removed or any of its siblings,
3075 	 * so we need to go through all children to find out if one of them
3076 	 * continues to block D3.
3077 	 */
3078 	if (d3cold_ok && !bridge->bridge_d3)
3079 		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3080 			     &d3cold_ok);
3081 
3082 	if (bridge->bridge_d3 != d3cold_ok) {
3083 		bridge->bridge_d3 = d3cold_ok;
3084 		/* Propagate change to upstream bridges */
3085 		pci_bridge_d3_update(bridge);
3086 	}
3087 }
3088 
3089 /**
3090  * pci_d3cold_enable - Enable D3cold for device
3091  * @dev: PCI device to handle
3092  *
3093  * This function can be used in drivers to enable D3cold from the device
3094  * they handle.  It also updates upstream PCI bridge PM capabilities
3095  * accordingly.
3096  */
3097 void pci_d3cold_enable(struct pci_dev *dev)
3098 {
3099 	if (dev->no_d3cold) {
3100 		dev->no_d3cold = false;
3101 		pci_bridge_d3_update(dev);
3102 	}
3103 }
3104 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3105 
3106 /**
3107  * pci_d3cold_disable - Disable D3cold for device
3108  * @dev: PCI device to handle
3109  *
3110  * This function can be used in drivers to disable D3cold from the device
3111  * they handle.  It also updates upstream PCI bridge PM capabilities
3112  * accordingly.
3113  */
3114 void pci_d3cold_disable(struct pci_dev *dev)
3115 {
3116 	if (!dev->no_d3cold) {
3117 		dev->no_d3cold = true;
3118 		pci_bridge_d3_update(dev);
3119 	}
3120 }
3121 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3122 
3123 void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
3124 {
3125 	pci_power_up(pci_dev);
3126 	pci_update_current_state(pci_dev, PCI_D0);
3127 }
3128 
3129 /**
3130  * pci_pm_init - Initialize PM functions of given PCI device
3131  * @dev: PCI device to handle.
3132  */
3133 void pci_pm_init(struct pci_dev *dev)
3134 {
3135 	int pm;
3136 	u16 pmc;
3137 
3138 	device_enable_async_suspend(&dev->dev);
3139 	dev->wakeup_prepared = false;
3140 
3141 	dev->pm_cap = 0;
3142 	dev->pme_support = 0;
3143 
3144 	/* find PCI PM capability in list */
3145 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3146 	if (!pm)
3147 		goto poweron;
3148 	/* Check device's ability to generate PME# */
3149 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3150 
3151 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3152 		pci_err(dev, "unsupported PM cap regs version (%u)\n",
3153 			pmc & PCI_PM_CAP_VER_MASK);
3154 		goto poweron;
3155 	}
3156 
3157 	dev->pm_cap = pm;
3158 	dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3159 	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3160 	dev->bridge_d3 = pci_bridge_d3_possible(dev);
3161 	dev->d3cold_allowed = true;
3162 
3163 	dev->d1_support = false;
3164 	dev->d2_support = false;
3165 	if (!pci_no_d1d2(dev)) {
3166 		if (pmc & PCI_PM_CAP_D1)
3167 			dev->d1_support = true;
3168 		if (pmc & PCI_PM_CAP_D2)
3169 			dev->d2_support = true;
3170 
3171 		if (dev->d1_support || dev->d2_support)
3172 			pci_info(dev, "supports%s%s\n",
3173 				   dev->d1_support ? " D1" : "",
3174 				   dev->d2_support ? " D2" : "");
3175 	}
3176 
3177 	pmc &= PCI_PM_CAP_PME_MASK;
3178 	if (pmc) {
3179 		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3180 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3181 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3182 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3183 			 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3184 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3185 		dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
3186 		dev->pme_poll = true;
3187 		/*
3188 		 * Make device's PM flags reflect the wake-up capability, but
3189 		 * let the user space enable it to wake up the system as needed.
3190 		 */
3191 		device_set_wakeup_capable(&dev->dev, true);
3192 		/* Disable the PME# generation functionality */
3193 		pci_pme_active(dev, false);
3194 	}
3195 
3196 poweron:
3197 	pci_pm_power_up_and_verify_state(dev);
3198 	pm_runtime_forbid(&dev->dev);
3199 	pm_runtime_set_active(&dev->dev);
3200 	pm_runtime_enable(&dev->dev);
3201 }
3202 
3203 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3204 {
3205 	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3206 
3207 	switch (prop) {
3208 	case PCI_EA_P_MEM:
3209 	case PCI_EA_P_VF_MEM:
3210 		flags |= IORESOURCE_MEM;
3211 		break;
3212 	case PCI_EA_P_MEM_PREFETCH:
3213 	case PCI_EA_P_VF_MEM_PREFETCH:
3214 		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3215 		break;
3216 	case PCI_EA_P_IO:
3217 		flags |= IORESOURCE_IO;
3218 		break;
3219 	default:
3220 		return 0;
3221 	}
3222 
3223 	return flags;
3224 }
3225 
3226 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3227 					    u8 prop)
3228 {
3229 	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3230 		return &dev->resource[bei];
3231 #ifdef CONFIG_PCI_IOV
3232 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3233 		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3234 		return &dev->resource[PCI_IOV_RESOURCES +
3235 				      bei - PCI_EA_BEI_VF_BAR0];
3236 #endif
3237 	else if (bei == PCI_EA_BEI_ROM)
3238 		return &dev->resource[PCI_ROM_RESOURCE];
3239 	else
3240 		return NULL;
3241 }
3242 
3243 /* Read an Enhanced Allocation (EA) entry */
3244 static int pci_ea_read(struct pci_dev *dev, int offset)
3245 {
3246 	struct resource *res;
3247 	const char *res_name;
3248 	int ent_size, ent_offset = offset;
3249 	resource_size_t start, end;
3250 	unsigned long flags;
3251 	u32 dw0, bei, base, max_offset;
3252 	u8 prop;
3253 	bool support_64 = (sizeof(resource_size_t) >= 8);
3254 
3255 	pci_read_config_dword(dev, ent_offset, &dw0);
3256 	ent_offset += 4;
3257 
3258 	/* Entry size field indicates DWORDs after 1st */
3259 	ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2;
3260 
3261 	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3262 		goto out;
3263 
3264 	bei = FIELD_GET(PCI_EA_BEI, dw0);
3265 	prop = FIELD_GET(PCI_EA_PP, dw0);
3266 
3267 	/*
3268 	 * If the Property is in the reserved range, try the Secondary
3269 	 * Property instead.
3270 	 */
3271 	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3272 		prop = FIELD_GET(PCI_EA_SP, dw0);
3273 	if (prop > PCI_EA_P_BRIDGE_IO)
3274 		goto out;
3275 
3276 	res = pci_ea_get_resource(dev, bei, prop);
3277 	res_name = pci_resource_name(dev, bei);
3278 	if (!res) {
3279 		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3280 		goto out;
3281 	}
3282 
3283 	flags = pci_ea_flags(dev, prop);
3284 	if (!flags) {
3285 		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3286 		goto out;
3287 	}
3288 
3289 	/* Read Base */
3290 	pci_read_config_dword(dev, ent_offset, &base);
3291 	start = (base & PCI_EA_FIELD_MASK);
3292 	ent_offset += 4;
3293 
3294 	/* Read MaxOffset */
3295 	pci_read_config_dword(dev, ent_offset, &max_offset);
3296 	ent_offset += 4;
3297 
3298 	/* Read Base MSBs (if 64-bit entry) */
3299 	if (base & PCI_EA_IS_64) {
3300 		u32 base_upper;
3301 
3302 		pci_read_config_dword(dev, ent_offset, &base_upper);
3303 		ent_offset += 4;
3304 
3305 		flags |= IORESOURCE_MEM_64;
3306 
3307 		/* entry starts above 32-bit boundary, can't use */
3308 		if (!support_64 && base_upper)
3309 			goto out;
3310 
3311 		if (support_64)
3312 			start |= ((u64)base_upper << 32);
3313 	}
3314 
3315 	end = start + (max_offset | 0x03);
3316 
3317 	/* Read MaxOffset MSBs (if 64-bit entry) */
3318 	if (max_offset & PCI_EA_IS_64) {
3319 		u32 max_offset_upper;
3320 
3321 		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3322 		ent_offset += 4;
3323 
3324 		flags |= IORESOURCE_MEM_64;
3325 
3326 		/* entry too big, can't use */
3327 		if (!support_64 && max_offset_upper)
3328 			goto out;
3329 
3330 		if (support_64)
3331 			end += ((u64)max_offset_upper << 32);
3332 	}
3333 
3334 	if (end < start) {
3335 		pci_err(dev, "EA Entry crosses address boundary\n");
3336 		goto out;
3337 	}
3338 
3339 	if (ent_size != ent_offset - offset) {
3340 		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3341 			ent_size, ent_offset - offset);
3342 		goto out;
3343 	}
3344 
3345 	res->name = pci_name(dev);
3346 	res->start = start;
3347 	res->end = end;
3348 	res->flags = flags;
3349 
3350 	if (bei <= PCI_EA_BEI_BAR5)
3351 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3352 			 res_name, res, prop);
3353 	else if (bei == PCI_EA_BEI_ROM)
3354 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3355 			 res_name, res, prop);
3356 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3357 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3358 			 res_name, res, prop);
3359 	else
3360 		pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
3361 			   bei, res, prop);
3362 
3363 out:
3364 	return offset + ent_size;
3365 }
3366 
3367 /* Enhanced Allocation Initialization */
3368 void pci_ea_init(struct pci_dev *dev)
3369 {
3370 	int ea;
3371 	u8 num_ent;
3372 	int offset;
3373 	int i;
3374 
3375 	/* find PCI EA capability in list */
3376 	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3377 	if (!ea)
3378 		return;
3379 
3380 	/* determine the number of entries */
3381 	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3382 					&num_ent);
3383 	num_ent &= PCI_EA_NUM_ENT_MASK;
3384 
3385 	offset = ea + PCI_EA_FIRST_ENT;
3386 
3387 	/* Skip DWORD 2 for type 1 functions */
3388 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3389 		offset += 4;
3390 
3391 	/* parse each EA entry */
3392 	for (i = 0; i < num_ent; ++i)
3393 		offset = pci_ea_read(dev, offset);
3394 }
3395 
3396 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3397 	struct pci_cap_saved_state *new_cap)
3398 {
3399 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3400 }
3401 
3402 /**
3403  * _pci_add_cap_save_buffer - allocate buffer for saving given
3404  *			      capability registers
3405  * @dev: the PCI device
3406  * @cap: the capability to allocate the buffer for
3407  * @extended: Standard or Extended capability ID
3408  * @size: requested size of the buffer
3409  */
3410 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3411 				    bool extended, unsigned int size)
3412 {
3413 	int pos;
3414 	struct pci_cap_saved_state *save_state;
3415 
3416 	if (extended)
3417 		pos = pci_find_ext_capability(dev, cap);
3418 	else
3419 		pos = pci_find_capability(dev, cap);
3420 
3421 	if (!pos)
3422 		return 0;
3423 
3424 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3425 	if (!save_state)
3426 		return -ENOMEM;
3427 
3428 	save_state->cap.cap_nr = cap;
3429 	save_state->cap.cap_extended = extended;
3430 	save_state->cap.size = size;
3431 	pci_add_saved_cap(dev, save_state);
3432 
3433 	return 0;
3434 }
3435 
3436 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3437 {
3438 	return _pci_add_cap_save_buffer(dev, cap, false, size);
3439 }
3440 
3441 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3442 {
3443 	return _pci_add_cap_save_buffer(dev, cap, true, size);
3444 }
3445 
3446 /**
3447  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3448  * @dev: the PCI device
3449  */
3450 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3451 {
3452 	int error;
3453 
3454 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3455 					PCI_EXP_SAVE_REGS * sizeof(u16));
3456 	if (error)
3457 		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3458 
3459 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3460 	if (error)
3461 		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3462 
3463 	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3464 					    2 * sizeof(u16));
3465 	if (error)
3466 		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3467 
3468 	pci_allocate_vc_save_buffers(dev);
3469 }
3470 
3471 void pci_free_cap_save_buffers(struct pci_dev *dev)
3472 {
3473 	struct pci_cap_saved_state *tmp;
3474 	struct hlist_node *n;
3475 
3476 	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3477 		kfree(tmp);
3478 }
3479 
3480 /**
3481  * pci_configure_ari - enable or disable ARI forwarding
3482  * @dev: the PCI device
3483  *
3484  * If @dev and its upstream bridge both support ARI, enable ARI in the
3485  * bridge.  Otherwise, disable ARI in the bridge.
3486  */
3487 void pci_configure_ari(struct pci_dev *dev)
3488 {
3489 	u32 cap;
3490 	struct pci_dev *bridge;
3491 
3492 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3493 		return;
3494 
3495 	bridge = dev->bus->self;
3496 	if (!bridge)
3497 		return;
3498 
3499 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3500 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3501 		return;
3502 
3503 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3504 		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3505 					 PCI_EXP_DEVCTL2_ARI);
3506 		bridge->ari_enabled = 1;
3507 	} else {
3508 		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3509 					   PCI_EXP_DEVCTL2_ARI);
3510 		bridge->ari_enabled = 0;
3511 	}
3512 }
3513 
3514 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3515 {
3516 	int pos;
3517 	u16 cap, ctrl;
3518 
3519 	pos = pdev->acs_cap;
3520 	if (!pos)
3521 		return false;
3522 
3523 	/*
3524 	 * Except for egress control, capabilities are either required
3525 	 * or only required if controllable.  Features missing from the
3526 	 * capability field can therefore be assumed as hard-wired enabled.
3527 	 */
3528 	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3529 	acs_flags &= (cap | PCI_ACS_EC);
3530 
3531 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3532 	return (ctrl & acs_flags) == acs_flags;
3533 }
3534 
3535 /**
3536  * pci_acs_enabled - test ACS against required flags for a given device
3537  * @pdev: device to test
3538  * @acs_flags: required PCI ACS flags
3539  *
3540  * Return true if the device supports the provided flags.  Automatically
3541  * filters out flags that are not implemented on multifunction devices.
3542  *
3543  * Note that this interface checks the effective ACS capabilities of the
3544  * device rather than the actual capabilities.  For instance, most single
3545  * function endpoints are not required to support ACS because they have no
3546  * opportunity for peer-to-peer access.  We therefore return 'true'
3547  * regardless of whether the device exposes an ACS capability.  This makes
3548  * it much easier for callers of this function to ignore the actual type
3549  * or topology of the device when testing ACS support.
3550  */
3551 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3552 {
3553 	int ret;
3554 
3555 	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3556 	if (ret >= 0)
3557 		return ret > 0;
3558 
3559 	/*
3560 	 * Conventional PCI and PCI-X devices never support ACS, either
3561 	 * effectively or actually.  The shared bus topology implies that
3562 	 * any device on the bus can receive or snoop DMA.
3563 	 */
3564 	if (!pci_is_pcie(pdev))
3565 		return false;
3566 
3567 	switch (pci_pcie_type(pdev)) {
3568 	/*
3569 	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3570 	 * but since their primary interface is PCI/X, we conservatively
3571 	 * handle them as we would a non-PCIe device.
3572 	 */
3573 	case PCI_EXP_TYPE_PCIE_BRIDGE:
3574 	/*
3575 	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3576 	 * applicable... must never implement an ACS Extended Capability...".
3577 	 * This seems arbitrary, but we take a conservative interpretation
3578 	 * of this statement.
3579 	 */
3580 	case PCI_EXP_TYPE_PCI_BRIDGE:
3581 	case PCI_EXP_TYPE_RC_EC:
3582 		return false;
3583 	/*
3584 	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3585 	 * implement ACS in order to indicate their peer-to-peer capabilities,
3586 	 * regardless of whether they are single- or multi-function devices.
3587 	 */
3588 	case PCI_EXP_TYPE_DOWNSTREAM:
3589 	case PCI_EXP_TYPE_ROOT_PORT:
3590 		return pci_acs_flags_enabled(pdev, acs_flags);
3591 	/*
3592 	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3593 	 * implemented by the remaining PCIe types to indicate peer-to-peer
3594 	 * capabilities, but only when they are part of a multifunction
3595 	 * device.  The footnote for section 6.12 indicates the specific
3596 	 * PCIe types included here.
3597 	 */
3598 	case PCI_EXP_TYPE_ENDPOINT:
3599 	case PCI_EXP_TYPE_UPSTREAM:
3600 	case PCI_EXP_TYPE_LEG_END:
3601 	case PCI_EXP_TYPE_RC_END:
3602 		if (!pdev->multifunction)
3603 			break;
3604 
3605 		return pci_acs_flags_enabled(pdev, acs_flags);
3606 	}
3607 
3608 	/*
3609 	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3610 	 * to single function devices with the exception of downstream ports.
3611 	 */
3612 	return true;
3613 }
3614 
3615 /**
3616  * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3617  * @start: starting downstream device
3618  * @end: ending upstream device or NULL to search to the root bus
3619  * @acs_flags: required flags
3620  *
3621  * Walk up a device tree from start to end testing PCI ACS support.  If
3622  * any step along the way does not support the required flags, return false.
3623  */
3624 bool pci_acs_path_enabled(struct pci_dev *start,
3625 			  struct pci_dev *end, u16 acs_flags)
3626 {
3627 	struct pci_dev *pdev, *parent = start;
3628 
3629 	do {
3630 		pdev = parent;
3631 
3632 		if (!pci_acs_enabled(pdev, acs_flags))
3633 			return false;
3634 
3635 		if (pci_is_root_bus(pdev->bus))
3636 			return (end == NULL);
3637 
3638 		parent = pdev->bus->self;
3639 	} while (pdev != end);
3640 
3641 	return true;
3642 }
3643 
3644 /**
3645  * pci_acs_init - Initialize ACS if hardware supports it
3646  * @dev: the PCI device
3647  */
3648 void pci_acs_init(struct pci_dev *dev)
3649 {
3650 	dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3651 
3652 	/*
3653 	 * Attempt to enable ACS regardless of capability because some Root
3654 	 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3655 	 * the standard ACS capability but still support ACS via those
3656 	 * quirks.
3657 	 */
3658 	pci_enable_acs(dev);
3659 }
3660 
3661 /**
3662  * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3663  * @dev: the PCI device
3664  * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3665  *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3666  *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3667  *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3668  *
3669  * Return 0 if all upstream bridges support AtomicOp routing, egress
3670  * blocking is disabled on all upstream ports, and the root port supports
3671  * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3672  * AtomicOp completion), or negative otherwise.
3673  */
3674 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3675 {
3676 	struct pci_bus *bus = dev->bus;
3677 	struct pci_dev *bridge;
3678 	u32 cap, ctl2;
3679 
3680 	/*
3681 	 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3682 	 * in Device Control 2 is reserved in VFs and the PF value applies
3683 	 * to all associated VFs.
3684 	 */
3685 	if (dev->is_virtfn)
3686 		return -EINVAL;
3687 
3688 	if (!pci_is_pcie(dev))
3689 		return -EINVAL;
3690 
3691 	/*
3692 	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3693 	 * AtomicOp requesters.  For now, we only support endpoints as
3694 	 * requesters and root ports as completers.  No endpoints as
3695 	 * completers, and no peer-to-peer.
3696 	 */
3697 
3698 	switch (pci_pcie_type(dev)) {
3699 	case PCI_EXP_TYPE_ENDPOINT:
3700 	case PCI_EXP_TYPE_LEG_END:
3701 	case PCI_EXP_TYPE_RC_END:
3702 		break;
3703 	default:
3704 		return -EINVAL;
3705 	}
3706 
3707 	while (bus->parent) {
3708 		bridge = bus->self;
3709 
3710 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3711 
3712 		switch (pci_pcie_type(bridge)) {
3713 		/* Ensure switch ports support AtomicOp routing */
3714 		case PCI_EXP_TYPE_UPSTREAM:
3715 		case PCI_EXP_TYPE_DOWNSTREAM:
3716 			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3717 				return -EINVAL;
3718 			break;
3719 
3720 		/* Ensure root port supports all the sizes we care about */
3721 		case PCI_EXP_TYPE_ROOT_PORT:
3722 			if ((cap & cap_mask) != cap_mask)
3723 				return -EINVAL;
3724 			break;
3725 		}
3726 
3727 		/* Ensure upstream ports don't block AtomicOps on egress */
3728 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3729 			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3730 						   &ctl2);
3731 			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3732 				return -EINVAL;
3733 		}
3734 
3735 		bus = bus->parent;
3736 	}
3737 
3738 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3739 				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3740 	return 0;
3741 }
3742 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3743 
3744 /**
3745  * pci_release_region - Release a PCI bar
3746  * @pdev: PCI device whose resources were previously reserved by
3747  *	  pci_request_region()
3748  * @bar: BAR to release
3749  *
3750  * Releases the PCI I/O and memory resources previously reserved by a
3751  * successful call to pci_request_region().  Call this function only
3752  * after all use of the PCI regions has ceased.
3753  */
3754 void pci_release_region(struct pci_dev *pdev, int bar)
3755 {
3756 	if (!pci_bar_index_is_valid(bar))
3757 		return;
3758 
3759 	if (pci_resource_len(pdev, bar) == 0)
3760 		return;
3761 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3762 		release_region(pci_resource_start(pdev, bar),
3763 				pci_resource_len(pdev, bar));
3764 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3765 		release_mem_region(pci_resource_start(pdev, bar),
3766 				pci_resource_len(pdev, bar));
3767 }
3768 EXPORT_SYMBOL(pci_release_region);
3769 
3770 /**
3771  * __pci_request_region - Reserved PCI I/O and memory resource
3772  * @pdev: PCI device whose resources are to be reserved
3773  * @bar: BAR to be reserved
3774  * @name: name of the driver requesting the resource
3775  * @exclusive: whether the region access is exclusive or not
3776  *
3777  * Returns: 0 on success, negative error code on failure.
3778  *
3779  * Mark the PCI region associated with PCI device @pdev BAR @bar as being
3780  * reserved by owner @name. Do not access any address inside the PCI regions
3781  * unless this call returns successfully.
3782  *
3783  * If @exclusive is set, then the region is marked so that userspace
3784  * is explicitly not allowed to map the resource via /dev/mem or
3785  * sysfs MMIO access.
3786  *
3787  * Returns 0 on success, or %EBUSY on error.  A warning
3788  * message is also printed on failure.
3789  */
3790 static int __pci_request_region(struct pci_dev *pdev, int bar,
3791 				const char *name, int exclusive)
3792 {
3793 	if (!pci_bar_index_is_valid(bar))
3794 		return -EINVAL;
3795 
3796 	if (pci_resource_len(pdev, bar) == 0)
3797 		return 0;
3798 
3799 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3800 		if (!request_region(pci_resource_start(pdev, bar),
3801 			    pci_resource_len(pdev, bar), name))
3802 			goto err_out;
3803 	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3804 		if (!__request_mem_region(pci_resource_start(pdev, bar),
3805 					pci_resource_len(pdev, bar), name,
3806 					exclusive))
3807 			goto err_out;
3808 	}
3809 
3810 	return 0;
3811 
3812 err_out:
3813 	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3814 		 &pdev->resource[bar]);
3815 	return -EBUSY;
3816 }
3817 
3818 /**
3819  * pci_request_region - Reserve PCI I/O and memory resource
3820  * @pdev: PCI device whose resources are to be reserved
3821  * @bar: BAR to be reserved
3822  * @name: name of the driver requesting the resource
3823  *
3824  * Returns: 0 on success, negative error code on failure.
3825  *
3826  * Mark the PCI region associated with PCI device @pdev BAR @bar as being
3827  * reserved by owner @name. Do not access any address inside the PCI regions
3828  * unless this call returns successfully.
3829  *
3830  * Returns 0 on success, or %EBUSY on error.  A warning
3831  * message is also printed on failure.
3832  */
3833 int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
3834 {
3835 	return __pci_request_region(pdev, bar, name, 0);
3836 }
3837 EXPORT_SYMBOL(pci_request_region);
3838 
3839 /**
3840  * pci_release_selected_regions - Release selected PCI I/O and memory resources
3841  * @pdev: PCI device whose resources were previously reserved
3842  * @bars: Bitmask of BARs to be released
3843  *
3844  * Release selected PCI I/O and memory resources previously reserved.
3845  * Call this function only after all use of the PCI regions has ceased.
3846  */
3847 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3848 {
3849 	int i;
3850 
3851 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
3852 		if (bars & (1 << i))
3853 			pci_release_region(pdev, i);
3854 }
3855 EXPORT_SYMBOL(pci_release_selected_regions);
3856 
3857 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3858 					  const char *name, int excl)
3859 {
3860 	int i;
3861 
3862 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
3863 		if (bars & (1 << i))
3864 			if (__pci_request_region(pdev, i, name, excl))
3865 				goto err_out;
3866 	return 0;
3867 
3868 err_out:
3869 	while (--i >= 0)
3870 		if (bars & (1 << i))
3871 			pci_release_region(pdev, i);
3872 
3873 	return -EBUSY;
3874 }
3875 
3876 
3877 /**
3878  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3879  * @pdev: PCI device whose resources are to be reserved
3880  * @bars: Bitmask of BARs to be requested
3881  * @name: Name of the driver requesting the resources
3882  *
3883  * Returns: 0 on success, negative error code on failure.
3884  */
3885 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3886 				 const char *name)
3887 {
3888 	return __pci_request_selected_regions(pdev, bars, name, 0);
3889 }
3890 EXPORT_SYMBOL(pci_request_selected_regions);
3891 
3892 /**
3893  * pci_request_selected_regions_exclusive - Request regions exclusively
3894  * @pdev: PCI device to request regions from
3895  * @bars: bit mask of BARs to request
3896  * @name: name of the driver requesting the resources
3897  *
3898  * Returns: 0 on success, negative error code on failure.
3899  */
3900 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3901 					   const char *name)
3902 {
3903 	return __pci_request_selected_regions(pdev, bars, name,
3904 			IORESOURCE_EXCLUSIVE);
3905 }
3906 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3907 
3908 /**
3909  * pci_release_regions - Release reserved PCI I/O and memory resources
3910  * @pdev: PCI device whose resources were previously reserved by
3911  *	  pci_request_regions()
3912  *
3913  * Releases all PCI I/O and memory resources previously reserved by a
3914  * successful call to pci_request_regions().  Call this function only
3915  * after all use of the PCI regions has ceased.
3916  */
3917 void pci_release_regions(struct pci_dev *pdev)
3918 {
3919 	pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3920 }
3921 EXPORT_SYMBOL(pci_release_regions);
3922 
3923 /**
3924  * pci_request_regions - Reserve PCI I/O and memory resources
3925  * @pdev: PCI device whose resources are to be reserved
3926  * @name: name of the driver requesting the resources
3927  *
3928  * Mark all PCI regions associated with PCI device @pdev as being reserved by
3929  * owner @name. Do not access any address inside the PCI regions unless this
3930  * call returns successfully.
3931  *
3932  * Returns 0 on success, or %EBUSY on error.  A warning
3933  * message is also printed on failure.
3934  */
3935 int pci_request_regions(struct pci_dev *pdev, const char *name)
3936 {
3937 	return pci_request_selected_regions(pdev,
3938 			((1 << PCI_STD_NUM_BARS) - 1), name);
3939 }
3940 EXPORT_SYMBOL(pci_request_regions);
3941 
3942 /**
3943  * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
3944  * @pdev: PCI device whose resources are to be reserved
3945  * @name: name of the driver requesting the resources
3946  *
3947  * Returns: 0 on success, negative error code on failure.
3948  *
3949  * Mark all PCI regions associated with PCI device @pdev as being reserved
3950  * by owner @name. Do not access any address inside the PCI regions
3951  * unless this call returns successfully.
3952  *
3953  * pci_request_regions_exclusive() will mark the region so that /dev/mem
3954  * and the sysfs MMIO access will not be allowed.
3955  *
3956  * Returns 0 on success, or %EBUSY on error.  A warning message is also
3957  * printed on failure.
3958  */
3959 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
3960 {
3961 	return pci_request_selected_regions_exclusive(pdev,
3962 				((1 << PCI_STD_NUM_BARS) - 1), name);
3963 }
3964 EXPORT_SYMBOL(pci_request_regions_exclusive);
3965 
3966 /*
3967  * Record the PCI IO range (expressed as CPU physical address + size).
3968  * Return a negative value if an error has occurred, zero otherwise
3969  */
3970 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
3971 			resource_size_t	size)
3972 {
3973 	int ret = 0;
3974 #ifdef PCI_IOBASE
3975 	struct logic_pio_hwaddr *range;
3976 
3977 	if (!size || addr + size < addr)
3978 		return -EINVAL;
3979 
3980 	range = kzalloc(sizeof(*range), GFP_ATOMIC);
3981 	if (!range)
3982 		return -ENOMEM;
3983 
3984 	range->fwnode = fwnode;
3985 	range->size = size;
3986 	range->hw_start = addr;
3987 	range->flags = LOGIC_PIO_CPU_MMIO;
3988 
3989 	ret = logic_pio_register_range(range);
3990 	if (ret)
3991 		kfree(range);
3992 
3993 	/* Ignore duplicates due to deferred probing */
3994 	if (ret == -EEXIST)
3995 		ret = 0;
3996 #endif
3997 
3998 	return ret;
3999 }
4000 
4001 phys_addr_t pci_pio_to_address(unsigned long pio)
4002 {
4003 #ifdef PCI_IOBASE
4004 	if (pio < MMIO_UPPER_LIMIT)
4005 		return logic_pio_to_hwaddr(pio);
4006 #endif
4007 
4008 	return (phys_addr_t) OF_BAD_ADDR;
4009 }
4010 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4011 
4012 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4013 {
4014 #ifdef PCI_IOBASE
4015 	return logic_pio_trans_cpuaddr(address);
4016 #else
4017 	if (address > IO_SPACE_LIMIT)
4018 		return (unsigned long)-1;
4019 
4020 	return (unsigned long) address;
4021 #endif
4022 }
4023 
4024 /**
4025  * pci_remap_iospace - Remap the memory mapped I/O space
4026  * @res: Resource describing the I/O space
4027  * @phys_addr: physical address of range to be mapped
4028  *
4029  * Remap the memory mapped I/O space described by the @res and the CPU
4030  * physical address @phys_addr into virtual address space.  Only
4031  * architectures that have memory mapped IO functions defined (and the
4032  * PCI_IOBASE value defined) should call this function.
4033  */
4034 #ifndef pci_remap_iospace
4035 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4036 {
4037 #if defined(PCI_IOBASE)
4038 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4039 
4040 	if (!(res->flags & IORESOURCE_IO))
4041 		return -EINVAL;
4042 
4043 	if (res->end > IO_SPACE_LIMIT)
4044 		return -EINVAL;
4045 
4046 	return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4047 			       pgprot_device(PAGE_KERNEL));
4048 #else
4049 	/*
4050 	 * This architecture does not have memory mapped I/O space,
4051 	 * so this function should never be called
4052 	 */
4053 	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4054 	return -ENODEV;
4055 #endif
4056 }
4057 EXPORT_SYMBOL(pci_remap_iospace);
4058 #endif
4059 
4060 /**
4061  * pci_unmap_iospace - Unmap the memory mapped I/O space
4062  * @res: resource to be unmapped
4063  *
4064  * Unmap the CPU virtual address @res from virtual address space.  Only
4065  * architectures that have memory mapped IO functions defined (and the
4066  * PCI_IOBASE value defined) should call this function.
4067  */
4068 void pci_unmap_iospace(struct resource *res)
4069 {
4070 #if defined(PCI_IOBASE)
4071 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4072 
4073 	vunmap_range(vaddr, vaddr + resource_size(res));
4074 #endif
4075 }
4076 EXPORT_SYMBOL(pci_unmap_iospace);
4077 
4078 static void __pci_set_master(struct pci_dev *dev, bool enable)
4079 {
4080 	u16 old_cmd, cmd;
4081 
4082 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4083 	if (enable)
4084 		cmd = old_cmd | PCI_COMMAND_MASTER;
4085 	else
4086 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4087 	if (cmd != old_cmd) {
4088 		pci_dbg(dev, "%s bus mastering\n",
4089 			enable ? "enabling" : "disabling");
4090 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4091 	}
4092 	dev->is_busmaster = enable;
4093 }
4094 
4095 /**
4096  * pcibios_setup - process "pci=" kernel boot arguments
4097  * @str: string used to pass in "pci=" kernel boot arguments
4098  *
4099  * Process kernel boot arguments.  This is the default implementation.
4100  * Architecture specific implementations can override this as necessary.
4101  */
4102 char * __weak __init pcibios_setup(char *str)
4103 {
4104 	return str;
4105 }
4106 
4107 /**
4108  * pcibios_set_master - enable PCI bus-mastering for device dev
4109  * @dev: the PCI device to enable
4110  *
4111  * Enables PCI bus-mastering for the device.  This is the default
4112  * implementation.  Architecture specific implementations can override
4113  * this if necessary.
4114  */
4115 void __weak pcibios_set_master(struct pci_dev *dev)
4116 {
4117 	u8 lat;
4118 
4119 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4120 	if (pci_is_pcie(dev))
4121 		return;
4122 
4123 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4124 	if (lat < 16)
4125 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4126 	else if (lat > pcibios_max_latency)
4127 		lat = pcibios_max_latency;
4128 	else
4129 		return;
4130 
4131 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4132 }
4133 
4134 /**
4135  * pci_set_master - enables bus-mastering for device dev
4136  * @dev: the PCI device to enable
4137  *
4138  * Enables bus-mastering on the device and calls pcibios_set_master()
4139  * to do the needed arch specific settings.
4140  */
4141 void pci_set_master(struct pci_dev *dev)
4142 {
4143 	__pci_set_master(dev, true);
4144 	pcibios_set_master(dev);
4145 }
4146 EXPORT_SYMBOL(pci_set_master);
4147 
4148 /**
4149  * pci_clear_master - disables bus-mastering for device dev
4150  * @dev: the PCI device to disable
4151  */
4152 void pci_clear_master(struct pci_dev *dev)
4153 {
4154 	__pci_set_master(dev, false);
4155 }
4156 EXPORT_SYMBOL(pci_clear_master);
4157 
4158 /**
4159  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4160  * @dev: the PCI device for which MWI is to be enabled
4161  *
4162  * Helper function for pci_set_mwi.
4163  * Originally copied from drivers/net/acenic.c.
4164  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4165  *
4166  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4167  */
4168 int pci_set_cacheline_size(struct pci_dev *dev)
4169 {
4170 	u8 cacheline_size;
4171 
4172 	if (!pci_cache_line_size)
4173 		return -EINVAL;
4174 
4175 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4176 	   equal to or multiple of the right value. */
4177 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4178 	if (cacheline_size >= pci_cache_line_size &&
4179 	    (cacheline_size % pci_cache_line_size) == 0)
4180 		return 0;
4181 
4182 	/* Write the correct value. */
4183 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4184 	/* Read it back. */
4185 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4186 	if (cacheline_size == pci_cache_line_size)
4187 		return 0;
4188 
4189 	pci_dbg(dev, "cache line size of %d is not supported\n",
4190 		   pci_cache_line_size << 2);
4191 
4192 	return -EINVAL;
4193 }
4194 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4195 
4196 /**
4197  * pci_set_mwi - enables memory-write-invalidate PCI transaction
4198  * @dev: the PCI device for which MWI is enabled
4199  *
4200  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4201  *
4202  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4203  */
4204 int pci_set_mwi(struct pci_dev *dev)
4205 {
4206 #ifdef PCI_DISABLE_MWI
4207 	return 0;
4208 #else
4209 	int rc;
4210 	u16 cmd;
4211 
4212 	rc = pci_set_cacheline_size(dev);
4213 	if (rc)
4214 		return rc;
4215 
4216 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4217 	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4218 		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4219 		cmd |= PCI_COMMAND_INVALIDATE;
4220 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4221 	}
4222 	return 0;
4223 #endif
4224 }
4225 EXPORT_SYMBOL(pci_set_mwi);
4226 
4227 /**
4228  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4229  * @dev: the PCI device for which MWI is enabled
4230  *
4231  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4232  * Callers are not required to check the return value.
4233  *
4234  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4235  */
4236 int pci_try_set_mwi(struct pci_dev *dev)
4237 {
4238 #ifdef PCI_DISABLE_MWI
4239 	return 0;
4240 #else
4241 	return pci_set_mwi(dev);
4242 #endif
4243 }
4244 EXPORT_SYMBOL(pci_try_set_mwi);
4245 
4246 /**
4247  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4248  * @dev: the PCI device to disable
4249  *
4250  * Disables PCI Memory-Write-Invalidate transaction on the device
4251  */
4252 void pci_clear_mwi(struct pci_dev *dev)
4253 {
4254 #ifndef PCI_DISABLE_MWI
4255 	u16 cmd;
4256 
4257 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4258 	if (cmd & PCI_COMMAND_INVALIDATE) {
4259 		cmd &= ~PCI_COMMAND_INVALIDATE;
4260 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4261 	}
4262 #endif
4263 }
4264 EXPORT_SYMBOL(pci_clear_mwi);
4265 
4266 /**
4267  * pci_disable_parity - disable parity checking for device
4268  * @dev: the PCI device to operate on
4269  *
4270  * Disable parity checking for device @dev
4271  */
4272 void pci_disable_parity(struct pci_dev *dev)
4273 {
4274 	u16 cmd;
4275 
4276 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4277 	if (cmd & PCI_COMMAND_PARITY) {
4278 		cmd &= ~PCI_COMMAND_PARITY;
4279 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4280 	}
4281 }
4282 
4283 /**
4284  * pci_intx - enables/disables PCI INTx for device dev
4285  * @pdev: the PCI device to operate on
4286  * @enable: boolean: whether to enable or disable PCI INTx
4287  *
4288  * Enables/disables PCI INTx for device @pdev
4289  */
4290 void pci_intx(struct pci_dev *pdev, int enable)
4291 {
4292 	u16 pci_command, new;
4293 
4294 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4295 
4296 	if (enable)
4297 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4298 	else
4299 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
4300 
4301 	if (new == pci_command)
4302 		return;
4303 
4304 	pci_write_config_word(pdev, PCI_COMMAND, new);
4305 }
4306 EXPORT_SYMBOL_GPL(pci_intx);
4307 
4308 /**
4309  * pci_wait_for_pending_transaction - wait for pending transaction
4310  * @dev: the PCI device to operate on
4311  *
4312  * Return 0 if transaction is pending 1 otherwise.
4313  */
4314 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4315 {
4316 	if (!pci_is_pcie(dev))
4317 		return 1;
4318 
4319 	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4320 				    PCI_EXP_DEVSTA_TRPND);
4321 }
4322 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4323 
4324 /**
4325  * pcie_flr - initiate a PCIe function level reset
4326  * @dev: device to reset
4327  *
4328  * Initiate a function level reset unconditionally on @dev without
4329  * checking any flags and DEVCAP
4330  */
4331 int pcie_flr(struct pci_dev *dev)
4332 {
4333 	if (!pci_wait_for_pending_transaction(dev))
4334 		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4335 
4336 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4337 
4338 	if (dev->imm_ready)
4339 		return 0;
4340 
4341 	/*
4342 	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4343 	 * 100ms, but may silently discard requests while the FLR is in
4344 	 * progress.  Wait 100ms before trying to access the device.
4345 	 */
4346 	msleep(100);
4347 
4348 	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4349 }
4350 EXPORT_SYMBOL_GPL(pcie_flr);
4351 
4352 /**
4353  * pcie_reset_flr - initiate a PCIe function level reset
4354  * @dev: device to reset
4355  * @probe: if true, return 0 if device can be reset this way
4356  *
4357  * Initiate a function level reset on @dev.
4358  */
4359 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4360 {
4361 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4362 		return -ENOTTY;
4363 
4364 	if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4365 		return -ENOTTY;
4366 
4367 	if (probe)
4368 		return 0;
4369 
4370 	return pcie_flr(dev);
4371 }
4372 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4373 
4374 static int pci_af_flr(struct pci_dev *dev, bool probe)
4375 {
4376 	int pos;
4377 	u8 cap;
4378 
4379 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4380 	if (!pos)
4381 		return -ENOTTY;
4382 
4383 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4384 		return -ENOTTY;
4385 
4386 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4387 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4388 		return -ENOTTY;
4389 
4390 	if (probe)
4391 		return 0;
4392 
4393 	/*
4394 	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4395 	 * is used, so we use the control offset rather than status and shift
4396 	 * the test bit to match.
4397 	 */
4398 	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4399 				 PCI_AF_STATUS_TP << 8))
4400 		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4401 
4402 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4403 
4404 	if (dev->imm_ready)
4405 		return 0;
4406 
4407 	/*
4408 	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4409 	 * updated 27 July 2006; a device must complete an FLR within
4410 	 * 100ms, but may silently discard requests while the FLR is in
4411 	 * progress.  Wait 100ms before trying to access the device.
4412 	 */
4413 	msleep(100);
4414 
4415 	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4416 }
4417 
4418 /**
4419  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4420  * @dev: Device to reset.
4421  * @probe: if true, return 0 if the device can be reset this way.
4422  *
4423  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4424  * unset, it will be reinitialized internally when going from PCI_D3hot to
4425  * PCI_D0.  If that's the case and the device is not in a low-power state
4426  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4427  *
4428  * NOTE: This causes the caller to sleep for twice the device power transition
4429  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4430  * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4431  * Moreover, only devices in D0 can be reset by this function.
4432  */
4433 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4434 {
4435 	u16 csr;
4436 
4437 	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4438 		return -ENOTTY;
4439 
4440 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4441 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4442 		return -ENOTTY;
4443 
4444 	if (probe)
4445 		return 0;
4446 
4447 	if (dev->current_state != PCI_D0)
4448 		return -EINVAL;
4449 
4450 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4451 	csr |= PCI_D3hot;
4452 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4453 	pci_dev_d3_sleep(dev);
4454 
4455 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4456 	csr |= PCI_D0;
4457 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4458 	pci_dev_d3_sleep(dev);
4459 
4460 	return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4461 }
4462 
4463 /**
4464  * pcie_wait_for_link_status - Wait for link status change
4465  * @pdev: Device whose link to wait for.
4466  * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE.
4467  * @active: Waiting for active or inactive?
4468  *
4469  * Return 0 if successful, or -ETIMEDOUT if status has not changed within
4470  * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4471  */
4472 static int pcie_wait_for_link_status(struct pci_dev *pdev,
4473 				     bool use_lt, bool active)
4474 {
4475 	u16 lnksta_mask, lnksta_match;
4476 	unsigned long end_jiffies;
4477 	u16 lnksta;
4478 
4479 	lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA;
4480 	lnksta_match = active ? lnksta_mask : 0;
4481 
4482 	end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS);
4483 	do {
4484 		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
4485 		if ((lnksta & lnksta_mask) == lnksta_match)
4486 			return 0;
4487 		msleep(1);
4488 	} while (time_before(jiffies, end_jiffies));
4489 
4490 	return -ETIMEDOUT;
4491 }
4492 
4493 /**
4494  * pcie_retrain_link - Request a link retrain and wait for it to complete
4495  * @pdev: Device whose link to retrain.
4496  * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
4497  *
4498  * Trigger retraining of the PCIe Link and wait for the completion of the
4499  * retraining. As link retraining is known to asserts LBMS and may change
4500  * the Link Speed, LBMS is cleared after the retraining and the Link Speed
4501  * of the subordinate bus is updated.
4502  *
4503  * Retrain completion status is retrieved from the Link Status Register
4504  * according to @use_lt.  It is not verified whether the use of the DLLLA
4505  * bit is valid.
4506  *
4507  * Return 0 if successful, or -ETIMEDOUT if training has not completed
4508  * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4509  */
4510 int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
4511 {
4512 	int rc;
4513 
4514 	/*
4515 	 * Ensure the updated LNKCTL parameters are used during link
4516 	 * training by checking that there is no ongoing link training that
4517 	 * may have started before link parameters were changed, so as to
4518 	 * avoid LTSSM race as recommended in Implementation Note at the end
4519 	 * of PCIe r6.1 sec 7.5.3.7.
4520 	 */
4521 	rc = pcie_wait_for_link_status(pdev, true, false);
4522 	if (rc)
4523 		return rc;
4524 
4525 	pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4526 	if (pdev->clear_retrain_link) {
4527 		/*
4528 		 * Due to an erratum in some devices the Retrain Link bit
4529 		 * needs to be cleared again manually to allow the link
4530 		 * training to succeed.
4531 		 */
4532 		pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4533 	}
4534 
4535 	rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
4536 
4537 	/*
4538 	 * Clear LBMS after a manual retrain so that the bit can be used
4539 	 * to track link speed or width changes made by hardware itself
4540 	 * in attempt to correct unreliable link operation.
4541 	 */
4542 	pcie_reset_lbms(pdev);
4543 
4544 	/*
4545 	 * Ensure the Link Speed updates after retraining in case the Link
4546 	 * Speed was changed because of the retraining. While the bwctrl's
4547 	 * IRQ handler normally picks up the new Link Speed, clearing LBMS
4548 	 * races with the IRQ handler reading the Link Status register and
4549 	 * can result in the handler returning early without updating the
4550 	 * Link Speed.
4551 	 */
4552 	if (pdev->subordinate)
4553 		pcie_update_link_speed(pdev->subordinate);
4554 
4555 	return rc;
4556 }
4557 
4558 /**
4559  * pcie_wait_for_link_delay - Wait until link is active or inactive
4560  * @pdev: Bridge device
4561  * @active: waiting for active or inactive?
4562  * @delay: Delay to wait after link has become active (in ms)
4563  *
4564  * Use this to wait till link becomes active or inactive.
4565  */
4566 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4567 				     int delay)
4568 {
4569 	int rc;
4570 
4571 	/*
4572 	 * Some controllers might not implement link active reporting. In this
4573 	 * case, we wait for 1000 ms + any delay requested by the caller.
4574 	 */
4575 	if (!pdev->link_active_reporting) {
4576 		msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay);
4577 		return true;
4578 	}
4579 
4580 	/*
4581 	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4582 	 * after which we should expect the link to be active if the reset was
4583 	 * successful. If so, software must wait a minimum 100ms before sending
4584 	 * configuration requests to devices downstream this port.
4585 	 *
4586 	 * If the link fails to activate, either the device was physically
4587 	 * removed or the link is permanently failed.
4588 	 */
4589 	if (active)
4590 		msleep(20);
4591 	rc = pcie_wait_for_link_status(pdev, false, active);
4592 	if (active) {
4593 		if (rc)
4594 			rc = pcie_failed_link_retrain(pdev);
4595 		if (rc)
4596 			return false;
4597 
4598 		msleep(delay);
4599 		return true;
4600 	}
4601 
4602 	if (rc)
4603 		return false;
4604 
4605 	return true;
4606 }
4607 
4608 /**
4609  * pcie_wait_for_link - Wait until link is active or inactive
4610  * @pdev: Bridge device
4611  * @active: waiting for active or inactive?
4612  *
4613  * Use this to wait till link becomes active or inactive.
4614  */
4615 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4616 {
4617 	return pcie_wait_for_link_delay(pdev, active, 100);
4618 }
4619 
4620 /*
4621  * Find maximum D3cold delay required by all the devices on the bus.  The
4622  * spec says 100 ms, but firmware can lower it and we allow drivers to
4623  * increase it as well.
4624  *
4625  * Called with @pci_bus_sem locked for reading.
4626  */
4627 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4628 {
4629 	const struct pci_dev *pdev;
4630 	int min_delay = 100;
4631 	int max_delay = 0;
4632 
4633 	list_for_each_entry(pdev, &bus->devices, bus_list) {
4634 		if (pdev->d3cold_delay < min_delay)
4635 			min_delay = pdev->d3cold_delay;
4636 		if (pdev->d3cold_delay > max_delay)
4637 			max_delay = pdev->d3cold_delay;
4638 	}
4639 
4640 	return max(min_delay, max_delay);
4641 }
4642 
4643 /**
4644  * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4645  * @dev: PCI bridge
4646  * @reset_type: reset type in human-readable form
4647  *
4648  * Handle necessary delays before access to the devices on the secondary
4649  * side of the bridge are permitted after D3cold to D0 transition
4650  * or Conventional Reset.
4651  *
4652  * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4653  * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4654  * 4.3.2.
4655  *
4656  * Return 0 on success or -ENOTTY if the first device on the secondary bus
4657  * failed to become accessible.
4658  */
4659 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
4660 {
4661 	struct pci_dev *child __free(pci_dev_put) = NULL;
4662 	int delay;
4663 
4664 	if (pci_dev_is_disconnected(dev))
4665 		return 0;
4666 
4667 	if (!pci_is_bridge(dev))
4668 		return 0;
4669 
4670 	down_read(&pci_bus_sem);
4671 
4672 	/*
4673 	 * We only deal with devices that are present currently on the bus.
4674 	 * For any hot-added devices the access delay is handled in pciehp
4675 	 * board_added(). In case of ACPI hotplug the firmware is expected
4676 	 * to configure the devices before OS is notified.
4677 	 */
4678 	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4679 		up_read(&pci_bus_sem);
4680 		return 0;
4681 	}
4682 
4683 	/* Take d3cold_delay requirements into account */
4684 	delay = pci_bus_max_d3cold_delay(dev->subordinate);
4685 	if (!delay) {
4686 		up_read(&pci_bus_sem);
4687 		return 0;
4688 	}
4689 
4690 	child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
4691 					     struct pci_dev, bus_list));
4692 	up_read(&pci_bus_sem);
4693 
4694 	/*
4695 	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4696 	 * accessing the device after reset (that is 1000 ms + 100 ms).
4697 	 */
4698 	if (!pci_is_pcie(dev)) {
4699 		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4700 		msleep(1000 + delay);
4701 		return 0;
4702 	}
4703 
4704 	/*
4705 	 * For PCIe downstream and root ports that do not support speeds
4706 	 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4707 	 * speeds (gen3) we need to wait first for the data link layer to
4708 	 * become active.
4709 	 *
4710 	 * However, 100 ms is the minimum and the PCIe spec says the
4711 	 * software must allow at least 1s before it can determine that the
4712 	 * device that did not respond is a broken device. Also device can
4713 	 * take longer than that to respond if it indicates so through Request
4714 	 * Retry Status completions.
4715 	 *
4716 	 * Therefore we wait for 100 ms and check for the device presence
4717 	 * until the timeout expires.
4718 	 */
4719 	if (!pcie_downstream_port(dev))
4720 		return 0;
4721 
4722 	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4723 		u16 status;
4724 
4725 		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4726 		msleep(delay);
4727 
4728 		if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay))
4729 			return 0;
4730 
4731 		/*
4732 		 * If the port supports active link reporting we now check
4733 		 * whether the link is active and if not bail out early with
4734 		 * the assumption that the device is not present anymore.
4735 		 */
4736 		if (!dev->link_active_reporting)
4737 			return -ENOTTY;
4738 
4739 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status);
4740 		if (!(status & PCI_EXP_LNKSTA_DLLLA))
4741 			return -ENOTTY;
4742 
4743 		return pci_dev_wait(child, reset_type,
4744 				    PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT);
4745 	}
4746 
4747 	pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4748 		delay);
4749 	if (!pcie_wait_for_link_delay(dev, true, delay)) {
4750 		/* Did not train, no need to wait any further */
4751 		pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay);
4752 		return -ENOTTY;
4753 	}
4754 
4755 	return pci_dev_wait(child, reset_type,
4756 			    PCIE_RESET_READY_POLL_MS - delay);
4757 }
4758 
4759 void pci_reset_secondary_bus(struct pci_dev *dev)
4760 {
4761 	u16 ctrl;
4762 
4763 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4764 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4765 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4766 
4767 	/*
4768 	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
4769 	 * this to 2ms to ensure that we meet the minimum requirement.
4770 	 */
4771 	msleep(2);
4772 
4773 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4774 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4775 }
4776 
4777 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4778 {
4779 	pci_reset_secondary_bus(dev);
4780 }
4781 
4782 /**
4783  * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4784  * @dev: Bridge device
4785  *
4786  * Use the bridge control register to assert reset on the secondary bus.
4787  * Devices on the secondary bus are left in power-on state.
4788  */
4789 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4790 {
4791 	if (!dev->block_cfg_access)
4792 		pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
4793 			      __builtin_return_address(0));
4794 	pcibios_reset_secondary_bus(dev);
4795 
4796 	return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
4797 }
4798 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4799 
4800 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
4801 {
4802 	struct pci_dev *pdev;
4803 
4804 	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4805 	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4806 		return -ENOTTY;
4807 
4808 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4809 		if (pdev != dev)
4810 			return -ENOTTY;
4811 
4812 	if (probe)
4813 		return 0;
4814 
4815 	return pci_bridge_secondary_bus_reset(dev->bus->self);
4816 }
4817 
4818 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
4819 {
4820 	int rc = -ENOTTY;
4821 
4822 	if (!hotplug || !try_module_get(hotplug->owner))
4823 		return rc;
4824 
4825 	if (hotplug->ops->reset_slot)
4826 		rc = hotplug->ops->reset_slot(hotplug, probe);
4827 
4828 	module_put(hotplug->owner);
4829 
4830 	return rc;
4831 }
4832 
4833 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
4834 {
4835 	if (dev->multifunction || dev->subordinate || !dev->slot ||
4836 	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4837 		return -ENOTTY;
4838 
4839 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4840 }
4841 
4842 static u16 cxl_port_dvsec(struct pci_dev *dev)
4843 {
4844 	return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
4845 					 PCI_DVSEC_CXL_PORT);
4846 }
4847 
4848 static bool cxl_sbr_masked(struct pci_dev *dev)
4849 {
4850 	u16 dvsec, reg;
4851 	int rc;
4852 
4853 	dvsec = cxl_port_dvsec(dev);
4854 	if (!dvsec)
4855 		return false;
4856 
4857 	rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
4858 	if (rc || PCI_POSSIBLE_ERROR(reg))
4859 		return false;
4860 
4861 	/*
4862 	 * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR
4863 	 * bit in Bridge Control has no effect.  When 1, the Port generates
4864 	 * hot reset when the SBR bit is set to 1.
4865 	 */
4866 	if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR)
4867 		return false;
4868 
4869 	return true;
4870 }
4871 
4872 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
4873 {
4874 	struct pci_dev *bridge = pci_upstream_bridge(dev);
4875 	int rc;
4876 
4877 	/*
4878 	 * If "dev" is below a CXL port that has SBR control masked, SBR
4879 	 * won't do anything, so return error.
4880 	 */
4881 	if (bridge && cxl_sbr_masked(bridge)) {
4882 		if (probe)
4883 			return 0;
4884 
4885 		return -ENOTTY;
4886 	}
4887 
4888 	rc = pci_dev_reset_slot_function(dev, probe);
4889 	if (rc != -ENOTTY)
4890 		return rc;
4891 	return pci_parent_bus_reset(dev, probe);
4892 }
4893 
4894 static int cxl_reset_bus_function(struct pci_dev *dev, bool probe)
4895 {
4896 	struct pci_dev *bridge;
4897 	u16 dvsec, reg, val;
4898 	int rc;
4899 
4900 	bridge = pci_upstream_bridge(dev);
4901 	if (!bridge)
4902 		return -ENOTTY;
4903 
4904 	dvsec = cxl_port_dvsec(bridge);
4905 	if (!dvsec)
4906 		return -ENOTTY;
4907 
4908 	if (probe)
4909 		return 0;
4910 
4911 	rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
4912 	if (rc)
4913 		return -ENOTTY;
4914 
4915 	if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) {
4916 		val = reg;
4917 	} else {
4918 		val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR;
4919 		pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
4920 				      val);
4921 	}
4922 
4923 	rc = pci_reset_bus_function(dev, probe);
4924 
4925 	if (reg != val)
4926 		pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
4927 				      reg);
4928 
4929 	return rc;
4930 }
4931 
4932 void pci_dev_lock(struct pci_dev *dev)
4933 {
4934 	/* block PM suspend, driver probe, etc. */
4935 	device_lock(&dev->dev);
4936 	pci_cfg_access_lock(dev);
4937 }
4938 EXPORT_SYMBOL_GPL(pci_dev_lock);
4939 
4940 /* Return 1 on successful lock, 0 on contention */
4941 int pci_dev_trylock(struct pci_dev *dev)
4942 {
4943 	if (device_trylock(&dev->dev)) {
4944 		if (pci_cfg_access_trylock(dev))
4945 			return 1;
4946 		device_unlock(&dev->dev);
4947 	}
4948 
4949 	return 0;
4950 }
4951 EXPORT_SYMBOL_GPL(pci_dev_trylock);
4952 
4953 void pci_dev_unlock(struct pci_dev *dev)
4954 {
4955 	pci_cfg_access_unlock(dev);
4956 	device_unlock(&dev->dev);
4957 }
4958 EXPORT_SYMBOL_GPL(pci_dev_unlock);
4959 
4960 static void pci_dev_save_and_disable(struct pci_dev *dev)
4961 {
4962 	const struct pci_error_handlers *err_handler =
4963 			dev->driver ? dev->driver->err_handler : NULL;
4964 
4965 	/*
4966 	 * dev->driver->err_handler->reset_prepare() is protected against
4967 	 * races with ->remove() by the device lock, which must be held by
4968 	 * the caller.
4969 	 */
4970 	if (err_handler && err_handler->reset_prepare)
4971 		err_handler->reset_prepare(dev);
4972 	else if (dev->driver)
4973 		pci_warn(dev, "resetting");
4974 
4975 	/*
4976 	 * Wake-up device prior to save.  PM registers default to D0 after
4977 	 * reset and a simple register restore doesn't reliably return
4978 	 * to a non-D0 state anyway.
4979 	 */
4980 	pci_set_power_state(dev, PCI_D0);
4981 
4982 	pci_save_state(dev);
4983 	/*
4984 	 * Disable the device by clearing the Command register, except for
4985 	 * INTx-disable which is set.  This not only disables MMIO and I/O port
4986 	 * BARs, but also prevents the device from being Bus Master, preventing
4987 	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
4988 	 * compliant devices, INTx-disable prevents legacy interrupts.
4989 	 */
4990 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4991 }
4992 
4993 static void pci_dev_restore(struct pci_dev *dev)
4994 {
4995 	const struct pci_error_handlers *err_handler =
4996 			dev->driver ? dev->driver->err_handler : NULL;
4997 
4998 	pci_restore_state(dev);
4999 
5000 	/*
5001 	 * dev->driver->err_handler->reset_done() is protected against
5002 	 * races with ->remove() by the device lock, which must be held by
5003 	 * the caller.
5004 	 */
5005 	if (err_handler && err_handler->reset_done)
5006 		err_handler->reset_done(dev);
5007 	else if (dev->driver)
5008 		pci_warn(dev, "reset done");
5009 }
5010 
5011 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5012 const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5013 	{ },
5014 	{ pci_dev_specific_reset, .name = "device_specific" },
5015 	{ pci_dev_acpi_reset, .name = "acpi" },
5016 	{ pcie_reset_flr, .name = "flr" },
5017 	{ pci_af_flr, .name = "af_flr" },
5018 	{ pci_pm_reset, .name = "pm" },
5019 	{ pci_reset_bus_function, .name = "bus" },
5020 	{ cxl_reset_bus_function, .name = "cxl_bus" },
5021 };
5022 
5023 /**
5024  * __pci_reset_function_locked - reset a PCI device function while holding
5025  * the @dev mutex lock.
5026  * @dev: PCI device to reset
5027  *
5028  * Some devices allow an individual function to be reset without affecting
5029  * other functions in the same device.  The PCI device must be responsive
5030  * to PCI config space in order to use this function.
5031  *
5032  * The device function is presumed to be unused and the caller is holding
5033  * the device mutex lock when this function is called.
5034  *
5035  * Resetting the device will make the contents of PCI configuration space
5036  * random, so any caller of this must be prepared to reinitialise the
5037  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5038  * etc.
5039  *
5040  * Returns 0 if the device function was successfully reset or negative if the
5041  * device doesn't support resetting a single function.
5042  */
5043 int __pci_reset_function_locked(struct pci_dev *dev)
5044 {
5045 	int i, m, rc;
5046 	const struct pci_reset_fn_method *method;
5047 
5048 	might_sleep();
5049 
5050 	/*
5051 	 * A reset method returns -ENOTTY if it doesn't support this device and
5052 	 * we should try the next method.
5053 	 *
5054 	 * If it returns 0 (success), we're finished.  If it returns any other
5055 	 * error, we're also finished: this indicates that further reset
5056 	 * mechanisms might be broken on the device.
5057 	 */
5058 	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5059 		m = dev->reset_methods[i];
5060 		if (!m)
5061 			return -ENOTTY;
5062 
5063 		method = &pci_reset_fn_methods[m];
5064 		pci_dbg(dev, "reset via %s\n", method->name);
5065 		rc = method->reset_fn(dev, PCI_RESET_DO_RESET);
5066 		if (!rc)
5067 			return 0;
5068 
5069 		pci_dbg(dev, "%s failed with %d\n", method->name, rc);
5070 		if (rc != -ENOTTY)
5071 			return rc;
5072 	}
5073 
5074 	return -ENOTTY;
5075 }
5076 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5077 
5078 /**
5079  * pci_init_reset_methods - check whether device can be safely reset
5080  * and store supported reset mechanisms.
5081  * @dev: PCI device to check for reset mechanisms
5082  *
5083  * Some devices allow an individual function to be reset without affecting
5084  * other functions in the same device.  The PCI device must be in D0-D3hot
5085  * state.
5086  *
5087  * Stores reset mechanisms supported by device in reset_methods byte array
5088  * which is a member of struct pci_dev.
5089  */
5090 void pci_init_reset_methods(struct pci_dev *dev)
5091 {
5092 	int m, i, rc;
5093 
5094 	BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5095 
5096 	might_sleep();
5097 
5098 	i = 0;
5099 	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5100 		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5101 		if (!rc)
5102 			dev->reset_methods[i++] = m;
5103 		else if (rc != -ENOTTY)
5104 			break;
5105 	}
5106 
5107 	dev->reset_methods[i] = 0;
5108 }
5109 
5110 /**
5111  * pci_reset_function - quiesce and reset a PCI device function
5112  * @dev: PCI device to reset
5113  *
5114  * Some devices allow an individual function to be reset without affecting
5115  * other functions in the same device.  The PCI device must be responsive
5116  * to PCI config space in order to use this function.
5117  *
5118  * This function does not just reset the PCI portion of a device, but
5119  * clears all the state associated with the device.  This function differs
5120  * from __pci_reset_function_locked() in that it saves and restores device state
5121  * over the reset and takes the PCI device lock.
5122  *
5123  * Returns 0 if the device function was successfully reset or negative if the
5124  * device doesn't support resetting a single function.
5125  */
5126 int pci_reset_function(struct pci_dev *dev)
5127 {
5128 	struct pci_dev *bridge;
5129 	int rc;
5130 
5131 	if (!pci_reset_supported(dev))
5132 		return -ENOTTY;
5133 
5134 	/*
5135 	 * If there's no upstream bridge, no locking is needed since there is
5136 	 * no upstream bridge configuration to hold consistent.
5137 	 */
5138 	bridge = pci_upstream_bridge(dev);
5139 	if (bridge)
5140 		pci_dev_lock(bridge);
5141 
5142 	pci_dev_lock(dev);
5143 	pci_dev_save_and_disable(dev);
5144 
5145 	rc = __pci_reset_function_locked(dev);
5146 
5147 	pci_dev_restore(dev);
5148 	pci_dev_unlock(dev);
5149 
5150 	if (bridge)
5151 		pci_dev_unlock(bridge);
5152 
5153 	return rc;
5154 }
5155 EXPORT_SYMBOL_GPL(pci_reset_function);
5156 
5157 /**
5158  * pci_reset_function_locked - quiesce and reset a PCI device function
5159  * @dev: PCI device to reset
5160  *
5161  * Some devices allow an individual function to be reset without affecting
5162  * other functions in the same device.  The PCI device must be responsive
5163  * to PCI config space in order to use this function.
5164  *
5165  * This function does not just reset the PCI portion of a device, but
5166  * clears all the state associated with the device.  This function differs
5167  * from __pci_reset_function_locked() in that it saves and restores device state
5168  * over the reset.  It also differs from pci_reset_function() in that it
5169  * requires the PCI device lock to be held.
5170  *
5171  * Returns 0 if the device function was successfully reset or negative if the
5172  * device doesn't support resetting a single function.
5173  */
5174 int pci_reset_function_locked(struct pci_dev *dev)
5175 {
5176 	int rc;
5177 
5178 	if (!pci_reset_supported(dev))
5179 		return -ENOTTY;
5180 
5181 	pci_dev_save_and_disable(dev);
5182 
5183 	rc = __pci_reset_function_locked(dev);
5184 
5185 	pci_dev_restore(dev);
5186 
5187 	return rc;
5188 }
5189 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5190 
5191 /**
5192  * pci_try_reset_function - quiesce and reset a PCI device function
5193  * @dev: PCI device to reset
5194  *
5195  * Same as above, except return -EAGAIN if unable to lock device.
5196  */
5197 int pci_try_reset_function(struct pci_dev *dev)
5198 {
5199 	int rc;
5200 
5201 	if (!pci_reset_supported(dev))
5202 		return -ENOTTY;
5203 
5204 	if (!pci_dev_trylock(dev))
5205 		return -EAGAIN;
5206 
5207 	pci_dev_save_and_disable(dev);
5208 	rc = __pci_reset_function_locked(dev);
5209 	pci_dev_restore(dev);
5210 	pci_dev_unlock(dev);
5211 
5212 	return rc;
5213 }
5214 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5215 
5216 /* Do any devices on or below this bus prevent a bus reset? */
5217 static bool pci_bus_resettable(struct pci_bus *bus)
5218 {
5219 	struct pci_dev *dev;
5220 
5221 
5222 	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5223 		return false;
5224 
5225 	list_for_each_entry(dev, &bus->devices, bus_list) {
5226 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5227 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5228 			return false;
5229 	}
5230 
5231 	return true;
5232 }
5233 
5234 /* Lock devices from the top of the tree down */
5235 static void pci_bus_lock(struct pci_bus *bus)
5236 {
5237 	struct pci_dev *dev;
5238 
5239 	pci_dev_lock(bus->self);
5240 	list_for_each_entry(dev, &bus->devices, bus_list) {
5241 		if (dev->subordinate)
5242 			pci_bus_lock(dev->subordinate);
5243 		else
5244 			pci_dev_lock(dev);
5245 	}
5246 }
5247 
5248 /* Unlock devices from the bottom of the tree up */
5249 static void pci_bus_unlock(struct pci_bus *bus)
5250 {
5251 	struct pci_dev *dev;
5252 
5253 	list_for_each_entry(dev, &bus->devices, bus_list) {
5254 		if (dev->subordinate)
5255 			pci_bus_unlock(dev->subordinate);
5256 		else
5257 			pci_dev_unlock(dev);
5258 	}
5259 	pci_dev_unlock(bus->self);
5260 }
5261 
5262 /* Return 1 on successful lock, 0 on contention */
5263 static int pci_bus_trylock(struct pci_bus *bus)
5264 {
5265 	struct pci_dev *dev;
5266 
5267 	if (!pci_dev_trylock(bus->self))
5268 		return 0;
5269 
5270 	list_for_each_entry(dev, &bus->devices, bus_list) {
5271 		if (dev->subordinate) {
5272 			if (!pci_bus_trylock(dev->subordinate))
5273 				goto unlock;
5274 		} else if (!pci_dev_trylock(dev))
5275 			goto unlock;
5276 	}
5277 	return 1;
5278 
5279 unlock:
5280 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5281 		if (dev->subordinate)
5282 			pci_bus_unlock(dev->subordinate);
5283 		else
5284 			pci_dev_unlock(dev);
5285 	}
5286 	pci_dev_unlock(bus->self);
5287 	return 0;
5288 }
5289 
5290 /* Do any devices on or below this slot prevent a bus reset? */
5291 static bool pci_slot_resettable(struct pci_slot *slot)
5292 {
5293 	struct pci_dev *dev;
5294 
5295 	if (slot->bus->self &&
5296 	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5297 		return false;
5298 
5299 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5300 		if (!dev->slot || dev->slot != slot)
5301 			continue;
5302 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5303 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5304 			return false;
5305 	}
5306 
5307 	return true;
5308 }
5309 
5310 /* Lock devices from the top of the tree down */
5311 static void pci_slot_lock(struct pci_slot *slot)
5312 {
5313 	struct pci_dev *dev;
5314 
5315 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5316 		if (!dev->slot || dev->slot != slot)
5317 			continue;
5318 		if (dev->subordinate)
5319 			pci_bus_lock(dev->subordinate);
5320 		else
5321 			pci_dev_lock(dev);
5322 	}
5323 }
5324 
5325 /* Unlock devices from the bottom of the tree up */
5326 static void pci_slot_unlock(struct pci_slot *slot)
5327 {
5328 	struct pci_dev *dev;
5329 
5330 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5331 		if (!dev->slot || dev->slot != slot)
5332 			continue;
5333 		if (dev->subordinate)
5334 			pci_bus_unlock(dev->subordinate);
5335 		else
5336 			pci_dev_unlock(dev);
5337 	}
5338 }
5339 
5340 /* Return 1 on successful lock, 0 on contention */
5341 static int pci_slot_trylock(struct pci_slot *slot)
5342 {
5343 	struct pci_dev *dev;
5344 
5345 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5346 		if (!dev->slot || dev->slot != slot)
5347 			continue;
5348 		if (dev->subordinate) {
5349 			if (!pci_bus_trylock(dev->subordinate)) {
5350 				pci_dev_unlock(dev);
5351 				goto unlock;
5352 			}
5353 		} else if (!pci_dev_trylock(dev))
5354 			goto unlock;
5355 	}
5356 	return 1;
5357 
5358 unlock:
5359 	list_for_each_entry_continue_reverse(dev,
5360 					     &slot->bus->devices, bus_list) {
5361 		if (!dev->slot || dev->slot != slot)
5362 			continue;
5363 		if (dev->subordinate)
5364 			pci_bus_unlock(dev->subordinate);
5365 		else
5366 			pci_dev_unlock(dev);
5367 	}
5368 	return 0;
5369 }
5370 
5371 /*
5372  * Save and disable devices from the top of the tree down while holding
5373  * the @dev mutex lock for the entire tree.
5374  */
5375 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5376 {
5377 	struct pci_dev *dev;
5378 
5379 	list_for_each_entry(dev, &bus->devices, bus_list) {
5380 		pci_dev_save_and_disable(dev);
5381 		if (dev->subordinate)
5382 			pci_bus_save_and_disable_locked(dev->subordinate);
5383 	}
5384 }
5385 
5386 /*
5387  * Restore devices from top of the tree down while holding @dev mutex lock
5388  * for the entire tree.  Parent bridges need to be restored before we can
5389  * get to subordinate devices.
5390  */
5391 static void pci_bus_restore_locked(struct pci_bus *bus)
5392 {
5393 	struct pci_dev *dev;
5394 
5395 	list_for_each_entry(dev, &bus->devices, bus_list) {
5396 		pci_dev_restore(dev);
5397 		if (dev->subordinate) {
5398 			pci_bridge_wait_for_secondary_bus(dev, "bus reset");
5399 			pci_bus_restore_locked(dev->subordinate);
5400 		}
5401 	}
5402 }
5403 
5404 /*
5405  * Save and disable devices from the top of the tree down while holding
5406  * the @dev mutex lock for the entire tree.
5407  */
5408 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5409 {
5410 	struct pci_dev *dev;
5411 
5412 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5413 		if (!dev->slot || dev->slot != slot)
5414 			continue;
5415 		pci_dev_save_and_disable(dev);
5416 		if (dev->subordinate)
5417 			pci_bus_save_and_disable_locked(dev->subordinate);
5418 	}
5419 }
5420 
5421 /*
5422  * Restore devices from top of the tree down while holding @dev mutex lock
5423  * for the entire tree.  Parent bridges need to be restored before we can
5424  * get to subordinate devices.
5425  */
5426 static void pci_slot_restore_locked(struct pci_slot *slot)
5427 {
5428 	struct pci_dev *dev;
5429 
5430 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5431 		if (!dev->slot || dev->slot != slot)
5432 			continue;
5433 		pci_dev_restore(dev);
5434 		if (dev->subordinate) {
5435 			pci_bridge_wait_for_secondary_bus(dev, "slot reset");
5436 			pci_bus_restore_locked(dev->subordinate);
5437 		}
5438 	}
5439 }
5440 
5441 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5442 {
5443 	int rc;
5444 
5445 	if (!slot || !pci_slot_resettable(slot))
5446 		return -ENOTTY;
5447 
5448 	if (!probe)
5449 		pci_slot_lock(slot);
5450 
5451 	might_sleep();
5452 
5453 	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5454 
5455 	if (!probe)
5456 		pci_slot_unlock(slot);
5457 
5458 	return rc;
5459 }
5460 
5461 /**
5462  * pci_probe_reset_slot - probe whether a PCI slot can be reset
5463  * @slot: PCI slot to probe
5464  *
5465  * Return 0 if slot can be reset, negative if a slot reset is not supported.
5466  */
5467 int pci_probe_reset_slot(struct pci_slot *slot)
5468 {
5469 	return pci_slot_reset(slot, PCI_RESET_PROBE);
5470 }
5471 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5472 
5473 /**
5474  * __pci_reset_slot - Try to reset a PCI slot
5475  * @slot: PCI slot to reset
5476  *
5477  * A PCI bus may host multiple slots, each slot may support a reset mechanism
5478  * independent of other slots.  For instance, some slots may support slot power
5479  * control.  In the case of a 1:1 bus to slot architecture, this function may
5480  * wrap the bus reset to avoid spurious slot related events such as hotplug.
5481  * Generally a slot reset should be attempted before a bus reset.  All of the
5482  * function of the slot and any subordinate buses behind the slot are reset
5483  * through this function.  PCI config space of all devices in the slot and
5484  * behind the slot is saved before and restored after reset.
5485  *
5486  * Same as above except return -EAGAIN if the slot cannot be locked
5487  */
5488 static int __pci_reset_slot(struct pci_slot *slot)
5489 {
5490 	int rc;
5491 
5492 	rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5493 	if (rc)
5494 		return rc;
5495 
5496 	if (pci_slot_trylock(slot)) {
5497 		pci_slot_save_and_disable_locked(slot);
5498 		might_sleep();
5499 		rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5500 		pci_slot_restore_locked(slot);
5501 		pci_slot_unlock(slot);
5502 	} else
5503 		rc = -EAGAIN;
5504 
5505 	return rc;
5506 }
5507 
5508 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5509 {
5510 	int ret;
5511 
5512 	if (!bus->self || !pci_bus_resettable(bus))
5513 		return -ENOTTY;
5514 
5515 	if (probe)
5516 		return 0;
5517 
5518 	pci_bus_lock(bus);
5519 
5520 	might_sleep();
5521 
5522 	ret = pci_bridge_secondary_bus_reset(bus->self);
5523 
5524 	pci_bus_unlock(bus);
5525 
5526 	return ret;
5527 }
5528 
5529 /**
5530  * pci_bus_error_reset - reset the bridge's subordinate bus
5531  * @bridge: The parent device that connects to the bus to reset
5532  *
5533  * This function will first try to reset the slots on this bus if the method is
5534  * available. If slot reset fails or is not available, this will fall back to a
5535  * secondary bus reset.
5536  */
5537 int pci_bus_error_reset(struct pci_dev *bridge)
5538 {
5539 	struct pci_bus *bus = bridge->subordinate;
5540 	struct pci_slot *slot;
5541 
5542 	if (!bus)
5543 		return -ENOTTY;
5544 
5545 	mutex_lock(&pci_slot_mutex);
5546 	if (list_empty(&bus->slots))
5547 		goto bus_reset;
5548 
5549 	list_for_each_entry(slot, &bus->slots, list)
5550 		if (pci_probe_reset_slot(slot))
5551 			goto bus_reset;
5552 
5553 	list_for_each_entry(slot, &bus->slots, list)
5554 		if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5555 			goto bus_reset;
5556 
5557 	mutex_unlock(&pci_slot_mutex);
5558 	return 0;
5559 bus_reset:
5560 	mutex_unlock(&pci_slot_mutex);
5561 	return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5562 }
5563 
5564 /**
5565  * pci_probe_reset_bus - probe whether a PCI bus can be reset
5566  * @bus: PCI bus to probe
5567  *
5568  * Return 0 if bus can be reset, negative if a bus reset is not supported.
5569  */
5570 int pci_probe_reset_bus(struct pci_bus *bus)
5571 {
5572 	return pci_bus_reset(bus, PCI_RESET_PROBE);
5573 }
5574 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5575 
5576 /**
5577  * __pci_reset_bus - Try to reset a PCI bus
5578  * @bus: top level PCI bus to reset
5579  *
5580  * Same as above except return -EAGAIN if the bus cannot be locked
5581  */
5582 int __pci_reset_bus(struct pci_bus *bus)
5583 {
5584 	int rc;
5585 
5586 	rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5587 	if (rc)
5588 		return rc;
5589 
5590 	if (pci_bus_trylock(bus)) {
5591 		pci_bus_save_and_disable_locked(bus);
5592 		might_sleep();
5593 		rc = pci_bridge_secondary_bus_reset(bus->self);
5594 		pci_bus_restore_locked(bus);
5595 		pci_bus_unlock(bus);
5596 	} else
5597 		rc = -EAGAIN;
5598 
5599 	return rc;
5600 }
5601 
5602 /**
5603  * pci_reset_bus - Try to reset a PCI bus
5604  * @pdev: top level PCI device to reset via slot/bus
5605  *
5606  * Same as above except return -EAGAIN if the bus cannot be locked
5607  */
5608 int pci_reset_bus(struct pci_dev *pdev)
5609 {
5610 	return (!pci_probe_reset_slot(pdev->slot)) ?
5611 	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5612 }
5613 EXPORT_SYMBOL_GPL(pci_reset_bus);
5614 
5615 /**
5616  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5617  * @dev: PCI device to query
5618  *
5619  * Returns mmrbc: maximum designed memory read count in bytes or
5620  * appropriate error value.
5621  */
5622 int pcix_get_max_mmrbc(struct pci_dev *dev)
5623 {
5624 	int cap;
5625 	u32 stat;
5626 
5627 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5628 	if (!cap)
5629 		return -EINVAL;
5630 
5631 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5632 		return -EINVAL;
5633 
5634 	return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat);
5635 }
5636 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5637 
5638 /**
5639  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5640  * @dev: PCI device to query
5641  *
5642  * Returns mmrbc: maximum memory read count in bytes or appropriate error
5643  * value.
5644  */
5645 int pcix_get_mmrbc(struct pci_dev *dev)
5646 {
5647 	int cap;
5648 	u16 cmd;
5649 
5650 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5651 	if (!cap)
5652 		return -EINVAL;
5653 
5654 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5655 		return -EINVAL;
5656 
5657 	return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5658 }
5659 EXPORT_SYMBOL(pcix_get_mmrbc);
5660 
5661 /**
5662  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5663  * @dev: PCI device to query
5664  * @mmrbc: maximum memory read count in bytes
5665  *    valid values are 512, 1024, 2048, 4096
5666  *
5667  * If possible sets maximum memory read byte count, some bridges have errata
5668  * that prevent this.
5669  */
5670 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5671 {
5672 	int cap;
5673 	u32 stat, v, o;
5674 	u16 cmd;
5675 
5676 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5677 		return -EINVAL;
5678 
5679 	v = ffs(mmrbc) - 10;
5680 
5681 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5682 	if (!cap)
5683 		return -EINVAL;
5684 
5685 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5686 		return -EINVAL;
5687 
5688 	if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat))
5689 		return -E2BIG;
5690 
5691 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5692 		return -EINVAL;
5693 
5694 	o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5695 	if (o != v) {
5696 		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5697 			return -EIO;
5698 
5699 		cmd &= ~PCI_X_CMD_MAX_READ;
5700 		cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v);
5701 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5702 			return -EIO;
5703 	}
5704 	return 0;
5705 }
5706 EXPORT_SYMBOL(pcix_set_mmrbc);
5707 
5708 /**
5709  * pcie_get_readrq - get PCI Express read request size
5710  * @dev: PCI device to query
5711  *
5712  * Returns maximum memory read request in bytes or appropriate error value.
5713  */
5714 int pcie_get_readrq(struct pci_dev *dev)
5715 {
5716 	u16 ctl;
5717 
5718 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5719 
5720 	return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl);
5721 }
5722 EXPORT_SYMBOL(pcie_get_readrq);
5723 
5724 /**
5725  * pcie_set_readrq - set PCI Express maximum memory read request
5726  * @dev: PCI device to query
5727  * @rq: maximum memory read count in bytes
5728  *    valid values are 128, 256, 512, 1024, 2048, 4096
5729  *
5730  * If possible sets maximum memory read request in bytes
5731  */
5732 int pcie_set_readrq(struct pci_dev *dev, int rq)
5733 {
5734 	u16 v;
5735 	int ret;
5736 	unsigned int firstbit;
5737 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
5738 
5739 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5740 		return -EINVAL;
5741 
5742 	/*
5743 	 * If using the "performance" PCIe config, we clamp the read rq
5744 	 * size to the max packet size to keep the host bridge from
5745 	 * generating requests larger than we can cope with.
5746 	 */
5747 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5748 		int mps = pcie_get_mps(dev);
5749 
5750 		if (mps < rq)
5751 			rq = mps;
5752 	}
5753 
5754 	firstbit = ffs(rq);
5755 	if (firstbit < 8)
5756 		return -EINVAL;
5757 	v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, firstbit - 8);
5758 
5759 	if (bridge->no_inc_mrrs) {
5760 		int max_mrrs = pcie_get_readrq(dev);
5761 
5762 		if (rq > max_mrrs) {
5763 			pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
5764 			return -EINVAL;
5765 		}
5766 	}
5767 
5768 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5769 						  PCI_EXP_DEVCTL_READRQ, v);
5770 
5771 	return pcibios_err_to_errno(ret);
5772 }
5773 EXPORT_SYMBOL(pcie_set_readrq);
5774 
5775 /**
5776  * pcie_get_mps - get PCI Express maximum payload size
5777  * @dev: PCI device to query
5778  *
5779  * Returns maximum payload size in bytes
5780  */
5781 int pcie_get_mps(struct pci_dev *dev)
5782 {
5783 	u16 ctl;
5784 
5785 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5786 
5787 	return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl);
5788 }
5789 EXPORT_SYMBOL(pcie_get_mps);
5790 
5791 /**
5792  * pcie_set_mps - set PCI Express maximum payload size
5793  * @dev: PCI device to query
5794  * @mps: maximum payload size in bytes
5795  *    valid values are 128, 256, 512, 1024, 2048, 4096
5796  *
5797  * If possible sets maximum payload size
5798  */
5799 int pcie_set_mps(struct pci_dev *dev, int mps)
5800 {
5801 	u16 v;
5802 	int ret;
5803 
5804 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5805 		return -EINVAL;
5806 
5807 	v = ffs(mps) - 8;
5808 	if (v > dev->pcie_mpss)
5809 		return -EINVAL;
5810 	v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v);
5811 
5812 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5813 						  PCI_EXP_DEVCTL_PAYLOAD, v);
5814 
5815 	return pcibios_err_to_errno(ret);
5816 }
5817 EXPORT_SYMBOL(pcie_set_mps);
5818 
5819 static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
5820 {
5821 	return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
5822 }
5823 
5824 int pcie_link_speed_mbps(struct pci_dev *pdev)
5825 {
5826 	u16 lnksta;
5827 	int err;
5828 
5829 	err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
5830 	if (err)
5831 		return err;
5832 
5833 	return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta));
5834 }
5835 EXPORT_SYMBOL(pcie_link_speed_mbps);
5836 
5837 /**
5838  * pcie_bandwidth_available - determine minimum link settings of a PCIe
5839  *			      device and its bandwidth limitation
5840  * @dev: PCI device to query
5841  * @limiting_dev: storage for device causing the bandwidth limitation
5842  * @speed: storage for speed of limiting device
5843  * @width: storage for width of limiting device
5844  *
5845  * Walk up the PCI device chain and find the point where the minimum
5846  * bandwidth is available.  Return the bandwidth available there and (if
5847  * limiting_dev, speed, and width pointers are supplied) information about
5848  * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
5849  * raw bandwidth.
5850  */
5851 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5852 			     enum pci_bus_speed *speed,
5853 			     enum pcie_link_width *width)
5854 {
5855 	u16 lnksta;
5856 	enum pci_bus_speed next_speed;
5857 	enum pcie_link_width next_width;
5858 	u32 bw, next_bw;
5859 
5860 	if (speed)
5861 		*speed = PCI_SPEED_UNKNOWN;
5862 	if (width)
5863 		*width = PCIE_LNK_WIDTH_UNKNOWN;
5864 
5865 	bw = 0;
5866 
5867 	while (dev) {
5868 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5869 
5870 		next_speed = to_pcie_link_speed(lnksta);
5871 		next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
5872 
5873 		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5874 
5875 		/* Check if current device limits the total bandwidth */
5876 		if (!bw || next_bw <= bw) {
5877 			bw = next_bw;
5878 
5879 			if (limiting_dev)
5880 				*limiting_dev = dev;
5881 			if (speed)
5882 				*speed = next_speed;
5883 			if (width)
5884 				*width = next_width;
5885 		}
5886 
5887 		dev = pci_upstream_bridge(dev);
5888 	}
5889 
5890 	return bw;
5891 }
5892 EXPORT_SYMBOL(pcie_bandwidth_available);
5893 
5894 /**
5895  * pcie_get_supported_speeds - query Supported Link Speed Vector
5896  * @dev: PCI device to query
5897  *
5898  * Query @dev supported link speeds.
5899  *
5900  * Implementation Note in PCIe r6.0 sec 7.5.3.18 recommends determining
5901  * supported link speeds using the Supported Link Speeds Vector in the Link
5902  * Capabilities 2 Register (when available).
5903  *
5904  * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.
5905  *
5906  * Without Link Capabilities 2, i.e., prior to PCIe r3.0, Supported Link
5907  * Speeds field in Link Capabilities is used and only 2.5 GT/s and 5.0 GT/s
5908  * speeds were defined.
5909  *
5910  * For @dev without Supported Link Speed Vector, the field is synthesized
5911  * from the Max Link Speed field in the Link Capabilities Register.
5912  *
5913  * Return: Supported Link Speeds Vector (+ reserved 0 at LSB).
5914  */
5915 u8 pcie_get_supported_speeds(struct pci_dev *dev)
5916 {
5917 	u32 lnkcap2, lnkcap;
5918 	u8 speeds;
5919 
5920 	/*
5921 	 * Speeds retain the reserved 0 at LSB before PCIe Supported Link
5922 	 * Speeds Vector to allow using SLS Vector bit defines directly.
5923 	 */
5924 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5925 	speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS;
5926 
5927 	/* Ignore speeds higher than Max Link Speed */
5928 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5929 	speeds &= GENMASK(lnkcap & PCI_EXP_LNKCAP_SLS, 0);
5930 
5931 	/* PCIe r3.0-compliant */
5932 	if (speeds)
5933 		return speeds;
5934 
5935 	/* Synthesize from the Max Link Speed field */
5936 	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5937 		speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB;
5938 	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5939 		speeds = PCI_EXP_LNKCAP2_SLS_2_5GB;
5940 
5941 	return speeds;
5942 }
5943 
5944 /**
5945  * pcie_get_speed_cap - query for the PCI device's link speed capability
5946  * @dev: PCI device to query
5947  *
5948  * Query the PCI device speed capability.
5949  *
5950  * Return: the maximum link speed supported by the device.
5951  */
5952 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5953 {
5954 	return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds);
5955 }
5956 EXPORT_SYMBOL(pcie_get_speed_cap);
5957 
5958 /**
5959  * pcie_get_width_cap - query for the PCI device's link width capability
5960  * @dev: PCI device to query
5961  *
5962  * Query the PCI device width capability.  Return the maximum link width
5963  * supported by the device.
5964  */
5965 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5966 {
5967 	u32 lnkcap;
5968 
5969 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5970 	if (lnkcap)
5971 		return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
5972 
5973 	return PCIE_LNK_WIDTH_UNKNOWN;
5974 }
5975 EXPORT_SYMBOL(pcie_get_width_cap);
5976 
5977 /**
5978  * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
5979  * @dev: PCI device
5980  * @speed: storage for link speed
5981  * @width: storage for link width
5982  *
5983  * Calculate a PCI device's link bandwidth by querying for its link speed
5984  * and width, multiplying them, and applying encoding overhead.  The result
5985  * is in Mb/s, i.e., megabits/second of raw bandwidth.
5986  */
5987 static u32 pcie_bandwidth_capable(struct pci_dev *dev,
5988 				  enum pci_bus_speed *speed,
5989 				  enum pcie_link_width *width)
5990 {
5991 	*speed = pcie_get_speed_cap(dev);
5992 	*width = pcie_get_width_cap(dev);
5993 
5994 	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5995 		return 0;
5996 
5997 	return *width * PCIE_SPEED2MBS_ENC(*speed);
5998 }
5999 
6000 /**
6001  * __pcie_print_link_status - Report the PCI device's link speed and width
6002  * @dev: PCI device to query
6003  * @verbose: Print info even when enough bandwidth is available
6004  *
6005  * If the available bandwidth at the device is less than the device is
6006  * capable of, report the device's maximum possible bandwidth and the
6007  * upstream link that limits its performance.  If @verbose, always print
6008  * the available bandwidth, even if the device isn't constrained.
6009  */
6010 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6011 {
6012 	enum pcie_link_width width, width_cap;
6013 	enum pci_bus_speed speed, speed_cap;
6014 	struct pci_dev *limiting_dev = NULL;
6015 	u32 bw_avail, bw_cap;
6016 	char *flit_mode = "";
6017 
6018 	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6019 	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6020 
6021 	if (dev->bus && dev->bus->flit_mode)
6022 		flit_mode = ", in Flit mode";
6023 
6024 	if (bw_avail >= bw_cap && verbose)
6025 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)%s\n",
6026 			 bw_cap / 1000, bw_cap % 1000,
6027 			 pci_speed_string(speed_cap), width_cap, flit_mode);
6028 	else if (bw_avail < bw_cap)
6029 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)%s\n",
6030 			 bw_avail / 1000, bw_avail % 1000,
6031 			 pci_speed_string(speed), width,
6032 			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6033 			 bw_cap / 1000, bw_cap % 1000,
6034 			 pci_speed_string(speed_cap), width_cap, flit_mode);
6035 }
6036 
6037 /**
6038  * pcie_print_link_status - Report the PCI device's link speed and width
6039  * @dev: PCI device to query
6040  *
6041  * Report the available bandwidth at the device.
6042  */
6043 void pcie_print_link_status(struct pci_dev *dev)
6044 {
6045 	__pcie_print_link_status(dev, true);
6046 }
6047 EXPORT_SYMBOL(pcie_print_link_status);
6048 
6049 /**
6050  * pci_select_bars - Make BAR mask from the type of resource
6051  * @dev: the PCI device for which BAR mask is made
6052  * @flags: resource type mask to be selected
6053  *
6054  * This helper routine makes bar mask from the type of resource.
6055  */
6056 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6057 {
6058 	int i, bars = 0;
6059 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
6060 		if (pci_resource_flags(dev, i) & flags)
6061 			bars |= (1 << i);
6062 	return bars;
6063 }
6064 EXPORT_SYMBOL(pci_select_bars);
6065 
6066 /* Some architectures require additional programming to enable VGA */
6067 static arch_set_vga_state_t arch_set_vga_state;
6068 
6069 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6070 {
6071 	arch_set_vga_state = func;	/* NULL disables */
6072 }
6073 
6074 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6075 				  unsigned int command_bits, u32 flags)
6076 {
6077 	if (arch_set_vga_state)
6078 		return arch_set_vga_state(dev, decode, command_bits,
6079 						flags);
6080 	return 0;
6081 }
6082 
6083 /**
6084  * pci_set_vga_state - set VGA decode state on device and parents if requested
6085  * @dev: the PCI device
6086  * @decode: true = enable decoding, false = disable decoding
6087  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6088  * @flags: traverse ancestors and change bridges
6089  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6090  */
6091 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6092 		      unsigned int command_bits, u32 flags)
6093 {
6094 	struct pci_bus *bus;
6095 	struct pci_dev *bridge;
6096 	u16 cmd;
6097 	int rc;
6098 
6099 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6100 
6101 	/* ARCH specific VGA enables */
6102 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6103 	if (rc)
6104 		return rc;
6105 
6106 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6107 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
6108 		if (decode)
6109 			cmd |= command_bits;
6110 		else
6111 			cmd &= ~command_bits;
6112 		pci_write_config_word(dev, PCI_COMMAND, cmd);
6113 	}
6114 
6115 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6116 		return 0;
6117 
6118 	bus = dev->bus;
6119 	while (bus) {
6120 		bridge = bus->self;
6121 		if (bridge) {
6122 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6123 					     &cmd);
6124 			if (decode)
6125 				cmd |= PCI_BRIDGE_CTL_VGA;
6126 			else
6127 				cmd &= ~PCI_BRIDGE_CTL_VGA;
6128 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6129 					      cmd);
6130 		}
6131 		bus = bus->parent;
6132 	}
6133 	return 0;
6134 }
6135 
6136 #ifdef CONFIG_ACPI
6137 bool pci_pr3_present(struct pci_dev *pdev)
6138 {
6139 	struct acpi_device *adev;
6140 
6141 	if (acpi_disabled)
6142 		return false;
6143 
6144 	adev = ACPI_COMPANION(&pdev->dev);
6145 	if (!adev)
6146 		return false;
6147 
6148 	return adev->power.flags.power_resources &&
6149 		acpi_has_method(adev->handle, "_PR3");
6150 }
6151 EXPORT_SYMBOL_GPL(pci_pr3_present);
6152 #endif
6153 
6154 /**
6155  * pci_add_dma_alias - Add a DMA devfn alias for a device
6156  * @dev: the PCI device for which alias is added
6157  * @devfn_from: alias slot and function
6158  * @nr_devfns: number of subsequent devfns to alias
6159  *
6160  * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6161  * which is used to program permissible bus-devfn source addresses for DMA
6162  * requests in an IOMMU.  These aliases factor into IOMMU group creation
6163  * and are useful for devices generating DMA requests beyond or different
6164  * from their logical bus-devfn.  Examples include device quirks where the
6165  * device simply uses the wrong devfn, as well as non-transparent bridges
6166  * where the alias may be a proxy for devices in another domain.
6167  *
6168  * IOMMU group creation is performed during device discovery or addition,
6169  * prior to any potential DMA mapping and therefore prior to driver probing
6170  * (especially for userspace assigned devices where IOMMU group definition
6171  * cannot be left as a userspace activity).  DMA aliases should therefore
6172  * be configured via quirks, such as the PCI fixup header quirk.
6173  */
6174 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6175 		       unsigned int nr_devfns)
6176 {
6177 	int devfn_to;
6178 
6179 	nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6180 	devfn_to = devfn_from + nr_devfns - 1;
6181 
6182 	if (!dev->dma_alias_mask)
6183 		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6184 	if (!dev->dma_alias_mask) {
6185 		pci_warn(dev, "Unable to allocate DMA alias mask\n");
6186 		return;
6187 	}
6188 
6189 	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6190 
6191 	if (nr_devfns == 1)
6192 		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6193 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6194 	else if (nr_devfns > 1)
6195 		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6196 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6197 				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6198 }
6199 
6200 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6201 {
6202 	return (dev1->dma_alias_mask &&
6203 		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6204 	       (dev2->dma_alias_mask &&
6205 		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6206 	       pci_real_dma_dev(dev1) == dev2 ||
6207 	       pci_real_dma_dev(dev2) == dev1;
6208 }
6209 
6210 bool pci_device_is_present(struct pci_dev *pdev)
6211 {
6212 	u32 v;
6213 
6214 	/* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6215 	pdev = pci_physfn(pdev);
6216 	if (pci_dev_is_disconnected(pdev))
6217 		return false;
6218 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6219 }
6220 EXPORT_SYMBOL_GPL(pci_device_is_present);
6221 
6222 void pci_ignore_hotplug(struct pci_dev *dev)
6223 {
6224 	struct pci_dev *bridge = dev->bus->self;
6225 
6226 	dev->ignore_hotplug = 1;
6227 	/* Propagate the "ignore hotplug" setting to the parent bridge. */
6228 	if (bridge)
6229 		bridge->ignore_hotplug = 1;
6230 }
6231 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6232 
6233 /**
6234  * pci_real_dma_dev - Get PCI DMA device for PCI device
6235  * @dev: the PCI device that may have a PCI DMA alias
6236  *
6237  * Permits the platform to provide architecture-specific functionality to
6238  * devices needing to alias DMA to another PCI device on another PCI bus. If
6239  * the PCI device is on the same bus, it is recommended to use
6240  * pci_add_dma_alias(). This is the default implementation. Architecture
6241  * implementations can override this.
6242  */
6243 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6244 {
6245 	return dev;
6246 }
6247 
6248 resource_size_t __weak pcibios_default_alignment(void)
6249 {
6250 	return 0;
6251 }
6252 
6253 /*
6254  * Arches that don't want to expose struct resource to userland as-is in
6255  * sysfs and /proc can implement their own pci_resource_to_user().
6256  */
6257 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6258 				 const struct resource *rsrc,
6259 				 resource_size_t *start, resource_size_t *end)
6260 {
6261 	*start = rsrc->start;
6262 	*end = rsrc->end;
6263 }
6264 
6265 static char *resource_alignment_param;
6266 static DEFINE_SPINLOCK(resource_alignment_lock);
6267 
6268 /**
6269  * pci_specified_resource_alignment - get resource alignment specified by user.
6270  * @dev: the PCI device to get
6271  * @resize: whether or not to change resources' size when reassigning alignment
6272  *
6273  * RETURNS: Resource alignment if it is specified.
6274  *          Zero if it is not specified.
6275  */
6276 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6277 							bool *resize)
6278 {
6279 	int align_order, count;
6280 	resource_size_t align = pcibios_default_alignment();
6281 	const char *p;
6282 	int ret;
6283 
6284 	spin_lock(&resource_alignment_lock);
6285 	p = resource_alignment_param;
6286 	if (!p || !*p)
6287 		goto out;
6288 	if (pci_has_flag(PCI_PROBE_ONLY)) {
6289 		align = 0;
6290 		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6291 		goto out;
6292 	}
6293 
6294 	while (*p) {
6295 		count = 0;
6296 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6297 		    p[count] == '@') {
6298 			p += count + 1;
6299 			if (align_order > 63) {
6300 				pr_err("PCI: Invalid requested alignment (order %d)\n",
6301 				       align_order);
6302 				align_order = PAGE_SHIFT;
6303 			}
6304 		} else {
6305 			align_order = PAGE_SHIFT;
6306 		}
6307 
6308 		ret = pci_dev_str_match(dev, p, &p);
6309 		if (ret == 1) {
6310 			*resize = true;
6311 			align = 1ULL << align_order;
6312 			break;
6313 		} else if (ret < 0) {
6314 			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6315 			       p);
6316 			break;
6317 		}
6318 
6319 		if (*p != ';' && *p != ',') {
6320 			/* End of param or invalid format */
6321 			break;
6322 		}
6323 		p++;
6324 	}
6325 out:
6326 	spin_unlock(&resource_alignment_lock);
6327 	return align;
6328 }
6329 
6330 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6331 					   resource_size_t align, bool resize)
6332 {
6333 	struct resource *r = &dev->resource[bar];
6334 	const char *r_name = pci_resource_name(dev, bar);
6335 	resource_size_t size;
6336 
6337 	if (!(r->flags & IORESOURCE_MEM))
6338 		return;
6339 
6340 	if (r->flags & IORESOURCE_PCI_FIXED) {
6341 		pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
6342 			 r_name, r, (unsigned long long)align);
6343 		return;
6344 	}
6345 
6346 	size = resource_size(r);
6347 	if (size >= align)
6348 		return;
6349 
6350 	/*
6351 	 * Increase the alignment of the resource.  There are two ways we
6352 	 * can do this:
6353 	 *
6354 	 * 1) Increase the size of the resource.  BARs are aligned on their
6355 	 *    size, so when we reallocate space for this resource, we'll
6356 	 *    allocate it with the larger alignment.  This also prevents
6357 	 *    assignment of any other BARs inside the alignment region, so
6358 	 *    if we're requesting page alignment, this means no other BARs
6359 	 *    will share the page.
6360 	 *
6361 	 *    The disadvantage is that this makes the resource larger than
6362 	 *    the hardware BAR, which may break drivers that compute things
6363 	 *    based on the resource size, e.g., to find registers at a
6364 	 *    fixed offset before the end of the BAR.
6365 	 *
6366 	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6367 	 *    set r->start to the desired alignment.  By itself this
6368 	 *    doesn't prevent other BARs being put inside the alignment
6369 	 *    region, but if we realign *every* resource of every device in
6370 	 *    the system, none of them will share an alignment region.
6371 	 *
6372 	 * When the user has requested alignment for only some devices via
6373 	 * the "pci=resource_alignment" argument, "resize" is true and we
6374 	 * use the first method.  Otherwise we assume we're aligning all
6375 	 * devices and we use the second.
6376 	 */
6377 
6378 	pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
6379 		 r_name, r, (unsigned long long)align);
6380 
6381 	if (resize) {
6382 		r->start = 0;
6383 		r->end = align - 1;
6384 	} else {
6385 		r->flags &= ~IORESOURCE_SIZEALIGN;
6386 		r->flags |= IORESOURCE_STARTALIGN;
6387 		resource_set_range(r, align, size);
6388 	}
6389 	r->flags |= IORESOURCE_UNSET;
6390 }
6391 
6392 /*
6393  * This function disables memory decoding and releases memory resources
6394  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6395  * It also rounds up size to specified alignment.
6396  * Later on, the kernel will assign page-aligned memory resource back
6397  * to the device.
6398  */
6399 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6400 {
6401 	int i;
6402 	struct resource *r;
6403 	resource_size_t align;
6404 	u16 command;
6405 	bool resize = false;
6406 
6407 	/*
6408 	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6409 	 * 3.4.1.11.  Their resources are allocated from the space
6410 	 * described by the VF BARx register in the PF's SR-IOV capability.
6411 	 * We can't influence their alignment here.
6412 	 */
6413 	if (dev->is_virtfn)
6414 		return;
6415 
6416 	/* check if specified PCI is target device to reassign */
6417 	align = pci_specified_resource_alignment(dev, &resize);
6418 	if (!align)
6419 		return;
6420 
6421 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6422 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6423 		pci_warn(dev, "Can't reassign resources to host bridge\n");
6424 		return;
6425 	}
6426 
6427 	pci_read_config_word(dev, PCI_COMMAND, &command);
6428 	command &= ~PCI_COMMAND_MEMORY;
6429 	pci_write_config_word(dev, PCI_COMMAND, command);
6430 
6431 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6432 		pci_request_resource_alignment(dev, i, align, resize);
6433 
6434 	/*
6435 	 * Need to disable bridge's resource window,
6436 	 * to enable the kernel to reassign new resource
6437 	 * window later on.
6438 	 */
6439 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6440 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6441 			r = &dev->resource[i];
6442 			if (!(r->flags & IORESOURCE_MEM))
6443 				continue;
6444 			r->flags |= IORESOURCE_UNSET;
6445 			r->end = resource_size(r) - 1;
6446 			r->start = 0;
6447 		}
6448 		pci_disable_bridge_window(dev);
6449 	}
6450 }
6451 
6452 static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf)
6453 {
6454 	size_t count = 0;
6455 
6456 	spin_lock(&resource_alignment_lock);
6457 	if (resource_alignment_param)
6458 		count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6459 	spin_unlock(&resource_alignment_lock);
6460 
6461 	return count;
6462 }
6463 
6464 static ssize_t resource_alignment_store(const struct bus_type *bus,
6465 					const char *buf, size_t count)
6466 {
6467 	char *param, *old, *end;
6468 
6469 	if (count >= (PAGE_SIZE - 1))
6470 		return -EINVAL;
6471 
6472 	param = kstrndup(buf, count, GFP_KERNEL);
6473 	if (!param)
6474 		return -ENOMEM;
6475 
6476 	end = strchr(param, '\n');
6477 	if (end)
6478 		*end = '\0';
6479 
6480 	spin_lock(&resource_alignment_lock);
6481 	old = resource_alignment_param;
6482 	if (strlen(param)) {
6483 		resource_alignment_param = param;
6484 	} else {
6485 		kfree(param);
6486 		resource_alignment_param = NULL;
6487 	}
6488 	spin_unlock(&resource_alignment_lock);
6489 
6490 	kfree(old);
6491 
6492 	return count;
6493 }
6494 
6495 static BUS_ATTR_RW(resource_alignment);
6496 
6497 static int __init pci_resource_alignment_sysfs_init(void)
6498 {
6499 	return bus_create_file(&pci_bus_type,
6500 					&bus_attr_resource_alignment);
6501 }
6502 late_initcall(pci_resource_alignment_sysfs_init);
6503 
6504 static void pci_no_domains(void)
6505 {
6506 #ifdef CONFIG_PCI_DOMAINS
6507 	pci_domains_supported = 0;
6508 #endif
6509 }
6510 
6511 #ifdef CONFIG_PCI_DOMAINS
6512 static DEFINE_IDA(pci_domain_nr_dynamic_ida);
6513 
6514 /**
6515  * pci_bus_find_emul_domain_nr() - allocate a PCI domain number per constraints
6516  * @hint: desired domain, 0 if any ID in the range of @min to @max is acceptable
6517  * @min: minimum allowable domain
6518  * @max: maximum allowable domain, no IDs higher than INT_MAX will be returned
6519  */
6520 int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max)
6521 {
6522 	return ida_alloc_range(&pci_domain_nr_dynamic_ida, max(hint, min), max,
6523 			       GFP_KERNEL);
6524 }
6525 EXPORT_SYMBOL_GPL(pci_bus_find_emul_domain_nr);
6526 
6527 void pci_bus_release_emul_domain_nr(int domain_nr)
6528 {
6529 	ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
6530 }
6531 EXPORT_SYMBOL_GPL(pci_bus_release_emul_domain_nr);
6532 #endif
6533 
6534 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6535 static DEFINE_IDA(pci_domain_nr_static_ida);
6536 
6537 static void of_pci_reserve_static_domain_nr(void)
6538 {
6539 	struct device_node *np;
6540 	int domain_nr;
6541 
6542 	for_each_node_by_type(np, "pci") {
6543 		domain_nr = of_get_pci_domain_nr(np);
6544 		if (domain_nr < 0)
6545 			continue;
6546 		/*
6547 		 * Permanently allocate domain_nr in dynamic_ida
6548 		 * to prevent it from dynamic allocation.
6549 		 */
6550 		ida_alloc_range(&pci_domain_nr_dynamic_ida,
6551 				domain_nr, domain_nr, GFP_KERNEL);
6552 	}
6553 }
6554 
6555 static int of_pci_bus_find_domain_nr(struct device *parent)
6556 {
6557 	static bool static_domains_reserved = false;
6558 	int domain_nr;
6559 
6560 	/* On the first call scan device tree for static allocations. */
6561 	if (!static_domains_reserved) {
6562 		of_pci_reserve_static_domain_nr();
6563 		static_domains_reserved = true;
6564 	}
6565 
6566 	if (parent) {
6567 		/*
6568 		 * If domain is in DT, allocate it in static IDA.  This
6569 		 * prevents duplicate static allocations in case of errors
6570 		 * in DT.
6571 		 */
6572 		domain_nr = of_get_pci_domain_nr(parent->of_node);
6573 		if (domain_nr >= 0)
6574 			return ida_alloc_range(&pci_domain_nr_static_ida,
6575 					       domain_nr, domain_nr,
6576 					       GFP_KERNEL);
6577 	}
6578 
6579 	/*
6580 	 * If domain was not specified in DT, choose a free ID from dynamic
6581 	 * allocations. All domain numbers from DT are permanently in
6582 	 * dynamic allocations to prevent assigning them to other DT nodes
6583 	 * without static domain.
6584 	 */
6585 	return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
6586 }
6587 
6588 static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6589 {
6590 	if (domain_nr < 0)
6591 		return;
6592 
6593 	/* Release domain from IDA where it was allocated. */
6594 	if (of_get_pci_domain_nr(parent->of_node) == domain_nr)
6595 		ida_free(&pci_domain_nr_static_ida, domain_nr);
6596 	else
6597 		ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
6598 }
6599 
6600 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6601 {
6602 	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6603 			       acpi_pci_bus_find_domain_nr(bus);
6604 }
6605 
6606 void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6607 {
6608 	if (!acpi_disabled)
6609 		return;
6610 	of_pci_bus_release_domain_nr(parent, domain_nr);
6611 }
6612 #endif
6613 
6614 /**
6615  * pci_ext_cfg_avail - can we access extended PCI config space?
6616  *
6617  * Returns 1 if we can access PCI extended config space (offsets
6618  * greater than 0xff). This is the default implementation. Architecture
6619  * implementations can override this.
6620  */
6621 int __weak pci_ext_cfg_avail(void)
6622 {
6623 	return 1;
6624 }
6625 
6626 static int __init pci_setup(char *str)
6627 {
6628 	while (str) {
6629 		char *k = strchr(str, ',');
6630 		if (k)
6631 			*k++ = 0;
6632 		if (*str && (str = pcibios_setup(str)) && *str) {
6633 			if (!strcmp(str, "nomsi")) {
6634 				pci_no_msi();
6635 			} else if (!strncmp(str, "noats", 5)) {
6636 				pr_info("PCIe: ATS is disabled\n");
6637 				pcie_ats_disabled = true;
6638 			} else if (!strcmp(str, "noaer")) {
6639 				pci_no_aer();
6640 			} else if (!strcmp(str, "earlydump")) {
6641 				pci_early_dump = true;
6642 			} else if (!strncmp(str, "realloc=", 8)) {
6643 				pci_realloc_get_opt(str + 8);
6644 			} else if (!strncmp(str, "realloc", 7)) {
6645 				pci_realloc_get_opt("on");
6646 			} else if (!strcmp(str, "nodomains")) {
6647 				pci_no_domains();
6648 			} else if (!strncmp(str, "noari", 5)) {
6649 				pcie_ari_disabled = true;
6650 			} else if (!strncmp(str, "notph", 5)) {
6651 				pci_no_tph();
6652 			} else if (!strncmp(str, "cbiosize=", 9)) {
6653 				pci_cardbus_io_size = memparse(str + 9, &str);
6654 			} else if (!strncmp(str, "cbmemsize=", 10)) {
6655 				pci_cardbus_mem_size = memparse(str + 10, &str);
6656 			} else if (!strncmp(str, "resource_alignment=", 19)) {
6657 				resource_alignment_param = str + 19;
6658 			} else if (!strncmp(str, "ecrc=", 5)) {
6659 				pcie_ecrc_get_policy(str + 5);
6660 			} else if (!strncmp(str, "hpiosize=", 9)) {
6661 				pci_hotplug_io_size = memparse(str + 9, &str);
6662 			} else if (!strncmp(str, "hpmmiosize=", 11)) {
6663 				pci_hotplug_mmio_size = memparse(str + 11, &str);
6664 			} else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6665 				pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6666 			} else if (!strncmp(str, "hpmemsize=", 10)) {
6667 				pci_hotplug_mmio_size = memparse(str + 10, &str);
6668 				pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6669 			} else if (!strncmp(str, "hpbussize=", 10)) {
6670 				pci_hotplug_bus_size =
6671 					simple_strtoul(str + 10, &str, 0);
6672 				if (pci_hotplug_bus_size > 0xff)
6673 					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6674 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6675 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
6676 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
6677 				pcie_bus_config = PCIE_BUS_SAFE;
6678 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
6679 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
6680 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6681 				pcie_bus_config = PCIE_BUS_PEER2PEER;
6682 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
6683 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6684 			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
6685 				disable_acs_redir_param = str + 18;
6686 			} else if (!strncmp(str, "config_acs=", 11)) {
6687 				config_acs_param = str + 11;
6688 			} else {
6689 				pr_err("PCI: Unknown option `%s'\n", str);
6690 			}
6691 		}
6692 		str = k;
6693 	}
6694 	return 0;
6695 }
6696 early_param("pci", pci_setup);
6697 
6698 /*
6699  * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6700  * in pci_setup(), above, to point to data in the __initdata section which
6701  * will be freed after the init sequence is complete. We can't allocate memory
6702  * in pci_setup() because some architectures do not have any memory allocation
6703  * service available during an early_param() call. So we allocate memory and
6704  * copy the variable here before the init section is freed.
6705  *
6706  */
6707 static int __init pci_realloc_setup_params(void)
6708 {
6709 	resource_alignment_param = kstrdup(resource_alignment_param,
6710 					   GFP_KERNEL);
6711 	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6712 	config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
6713 
6714 	return 0;
6715 }
6716 pure_initcall(pci_realloc_setup_params);
6717