xref: /linux/drivers/pci/pci.c (revision dea140198b846f7432d78566b7b0b83979c72c2b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Bus Services, see include/linux/pci.h for further explanation.
4  *
5  * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6  * David Mosberger-Tang
7  *
8  * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/pci_hotplug.h>
29 #include <linux/vmalloc.h>
30 #include <asm/dma.h>
31 #include <linux/aer.h>
32 #include <linux/bitfield.h>
33 #include "pci.h"
34 
35 DEFINE_MUTEX(pci_slot_mutex);
36 
37 const char *pci_power_names[] = {
38 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
39 };
40 EXPORT_SYMBOL_GPL(pci_power_names);
41 
42 #ifdef CONFIG_X86_32
43 int isa_dma_bridge_buggy;
44 EXPORT_SYMBOL(isa_dma_bridge_buggy);
45 #endif
46 
47 int pci_pci_problems;
48 EXPORT_SYMBOL(pci_pci_problems);
49 
50 unsigned int pci_pm_d3hot_delay;
51 
52 static void pci_pme_list_scan(struct work_struct *work);
53 
54 static LIST_HEAD(pci_pme_list);
55 static DEFINE_MUTEX(pci_pme_list_mutex);
56 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
57 
58 struct pci_pme_device {
59 	struct list_head list;
60 	struct pci_dev *dev;
61 };
62 
63 #define PME_TIMEOUT 1000 /* How long between PME checks */
64 
65 /*
66  * Following exit from Conventional Reset, devices must be ready within 1 sec
67  * (PCIe r6.0 sec 6.6.1).  A D3cold to D0 transition implies a Conventional
68  * Reset (PCIe r6.0 sec 5.8).
69  */
70 #define PCI_RESET_WAIT 1000 /* msec */
71 
72 /*
73  * Devices may extend the 1 sec period through Request Retry Status
74  * completions (PCIe r6.0 sec 2.3.1).  The spec does not provide an upper
75  * limit, but 60 sec ought to be enough for any device to become
76  * responsive.
77  */
78 #define PCIE_RESET_READY_POLL_MS 60000 /* msec */
79 
80 static void pci_dev_d3_sleep(struct pci_dev *dev)
81 {
82 	unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
83 	unsigned int upper;
84 
85 	if (delay_ms) {
86 		/* Use a 20% upper bound, 1ms minimum */
87 		upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
88 		usleep_range(delay_ms * USEC_PER_MSEC,
89 			     (delay_ms + upper) * USEC_PER_MSEC);
90 	}
91 }
92 
93 bool pci_reset_supported(struct pci_dev *dev)
94 {
95 	return dev->reset_methods[0] != 0;
96 }
97 
98 #ifdef CONFIG_PCI_DOMAINS
99 int pci_domains_supported = 1;
100 #endif
101 
102 #define DEFAULT_CARDBUS_IO_SIZE		(256)
103 #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
104 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
105 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
106 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
107 
108 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
109 #define DEFAULT_HOTPLUG_MMIO_SIZE	(2*1024*1024)
110 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE	(2*1024*1024)
111 /* hpiosize=nn can override this */
112 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
113 /*
114  * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
115  * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
116  * pci=hpmemsize=nnM overrides both
117  */
118 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
119 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
120 
121 #define DEFAULT_HOTPLUG_BUS_SIZE	1
122 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
123 
124 
125 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
126 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
127 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
128 #elif defined CONFIG_PCIE_BUS_SAFE
129 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
130 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
131 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
132 #elif defined CONFIG_PCIE_BUS_PEER2PEER
133 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
134 #else
135 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
136 #endif
137 
138 /*
139  * The default CLS is used if arch didn't set CLS explicitly and not
140  * all pci devices agree on the same value.  Arch can override either
141  * the dfl or actual value as it sees fit.  Don't forget this is
142  * measured in 32-bit words, not bytes.
143  */
144 u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2;
145 u8 pci_cache_line_size __ro_after_init ;
146 
147 /*
148  * If we set up a device for bus mastering, we need to check the latency
149  * timer as certain BIOSes forget to set it properly.
150  */
151 unsigned int pcibios_max_latency = 255;
152 
153 /* If set, the PCIe ARI capability will not be used. */
154 static bool pcie_ari_disabled;
155 
156 /* If set, the PCIe ATS capability will not be used. */
157 static bool pcie_ats_disabled;
158 
159 /* If set, the PCI config space of each device is printed during boot. */
160 bool pci_early_dump;
161 
162 bool pci_ats_disabled(void)
163 {
164 	return pcie_ats_disabled;
165 }
166 EXPORT_SYMBOL_GPL(pci_ats_disabled);
167 
168 /* Disable bridge_d3 for all PCIe ports */
169 static bool pci_bridge_d3_disable;
170 /* Force bridge_d3 for all PCIe ports */
171 static bool pci_bridge_d3_force;
172 
173 static int __init pcie_port_pm_setup(char *str)
174 {
175 	if (!strcmp(str, "off"))
176 		pci_bridge_d3_disable = true;
177 	else if (!strcmp(str, "force"))
178 		pci_bridge_d3_force = true;
179 	return 1;
180 }
181 __setup("pcie_port_pm=", pcie_port_pm_setup);
182 
183 /**
184  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
185  * @bus: pointer to PCI bus structure to search
186  *
187  * Given a PCI bus, returns the highest PCI bus number present in the set
188  * including the given PCI bus and its list of child PCI buses.
189  */
190 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
191 {
192 	struct pci_bus *tmp;
193 	unsigned char max, n;
194 
195 	max = bus->busn_res.end;
196 	list_for_each_entry(tmp, &bus->children, node) {
197 		n = pci_bus_max_busnr(tmp);
198 		if (n > max)
199 			max = n;
200 	}
201 	return max;
202 }
203 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
204 
205 /**
206  * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
207  * @pdev: the PCI device
208  *
209  * Returns error bits set in PCI_STATUS and clears them.
210  */
211 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
212 {
213 	u16 status;
214 	int ret;
215 
216 	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
217 	if (ret != PCIBIOS_SUCCESSFUL)
218 		return -EIO;
219 
220 	status &= PCI_STATUS_ERROR_BITS;
221 	if (status)
222 		pci_write_config_word(pdev, PCI_STATUS, status);
223 
224 	return status;
225 }
226 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
227 
228 #ifdef CONFIG_HAS_IOMEM
229 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
230 					    bool write_combine)
231 {
232 	struct resource *res = &pdev->resource[bar];
233 	resource_size_t start = res->start;
234 	resource_size_t size = resource_size(res);
235 
236 	/*
237 	 * Make sure the BAR is actually a memory resource, not an IO resource
238 	 */
239 	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
240 		pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
241 		return NULL;
242 	}
243 
244 	if (write_combine)
245 		return ioremap_wc(start, size);
246 
247 	return ioremap(start, size);
248 }
249 
250 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
251 {
252 	return __pci_ioremap_resource(pdev, bar, false);
253 }
254 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
255 
256 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
257 {
258 	return __pci_ioremap_resource(pdev, bar, true);
259 }
260 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
261 #endif
262 
263 /**
264  * pci_dev_str_match_path - test if a path string matches a device
265  * @dev: the PCI device to test
266  * @path: string to match the device against
267  * @endptr: pointer to the string after the match
268  *
269  * Test if a string (typically from a kernel parameter) formatted as a
270  * path of device/function addresses matches a PCI device. The string must
271  * be of the form:
272  *
273  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
274  *
275  * A path for a device can be obtained using 'lspci -t'.  Using a path
276  * is more robust against bus renumbering than using only a single bus,
277  * device and function address.
278  *
279  * Returns 1 if the string matches the device, 0 if it does not and
280  * a negative error code if it fails to parse the string.
281  */
282 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
283 				  const char **endptr)
284 {
285 	int ret;
286 	unsigned int seg, bus, slot, func;
287 	char *wpath, *p;
288 	char end;
289 
290 	*endptr = strchrnul(path, ';');
291 
292 	wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
293 	if (!wpath)
294 		return -ENOMEM;
295 
296 	while (1) {
297 		p = strrchr(wpath, '/');
298 		if (!p)
299 			break;
300 		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
301 		if (ret != 2) {
302 			ret = -EINVAL;
303 			goto free_and_exit;
304 		}
305 
306 		if (dev->devfn != PCI_DEVFN(slot, func)) {
307 			ret = 0;
308 			goto free_and_exit;
309 		}
310 
311 		/*
312 		 * Note: we don't need to get a reference to the upstream
313 		 * bridge because we hold a reference to the top level
314 		 * device which should hold a reference to the bridge,
315 		 * and so on.
316 		 */
317 		dev = pci_upstream_bridge(dev);
318 		if (!dev) {
319 			ret = 0;
320 			goto free_and_exit;
321 		}
322 
323 		*p = 0;
324 	}
325 
326 	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
327 		     &func, &end);
328 	if (ret != 4) {
329 		seg = 0;
330 		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
331 		if (ret != 3) {
332 			ret = -EINVAL;
333 			goto free_and_exit;
334 		}
335 	}
336 
337 	ret = (seg == pci_domain_nr(dev->bus) &&
338 	       bus == dev->bus->number &&
339 	       dev->devfn == PCI_DEVFN(slot, func));
340 
341 free_and_exit:
342 	kfree(wpath);
343 	return ret;
344 }
345 
346 /**
347  * pci_dev_str_match - test if a string matches a device
348  * @dev: the PCI device to test
349  * @p: string to match the device against
350  * @endptr: pointer to the string after the match
351  *
352  * Test if a string (typically from a kernel parameter) matches a specified
353  * PCI device. The string may be of one of the following formats:
354  *
355  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
356  *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
357  *
358  * The first format specifies a PCI bus/device/function address which
359  * may change if new hardware is inserted, if motherboard firmware changes,
360  * or due to changes caused in kernel parameters. If the domain is
361  * left unspecified, it is taken to be 0.  In order to be robust against
362  * bus renumbering issues, a path of PCI device/function numbers may be used
363  * to address the specific device.  The path for a device can be determined
364  * through the use of 'lspci -t'.
365  *
366  * The second format matches devices using IDs in the configuration
367  * space which may match multiple devices in the system. A value of 0
368  * for any field will match all devices. (Note: this differs from
369  * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
370  * legacy reasons and convenience so users don't have to specify
371  * FFFFFFFFs on the command line.)
372  *
373  * Returns 1 if the string matches the device, 0 if it does not and
374  * a negative error code if the string cannot be parsed.
375  */
376 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
377 			     const char **endptr)
378 {
379 	int ret;
380 	int count;
381 	unsigned short vendor, device, subsystem_vendor, subsystem_device;
382 
383 	if (strncmp(p, "pci:", 4) == 0) {
384 		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
385 		p += 4;
386 		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
387 			     &subsystem_vendor, &subsystem_device, &count);
388 		if (ret != 4) {
389 			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
390 			if (ret != 2)
391 				return -EINVAL;
392 
393 			subsystem_vendor = 0;
394 			subsystem_device = 0;
395 		}
396 
397 		p += count;
398 
399 		if ((!vendor || vendor == dev->vendor) &&
400 		    (!device || device == dev->device) &&
401 		    (!subsystem_vendor ||
402 			    subsystem_vendor == dev->subsystem_vendor) &&
403 		    (!subsystem_device ||
404 			    subsystem_device == dev->subsystem_device))
405 			goto found;
406 	} else {
407 		/*
408 		 * PCI Bus, Device, Function IDs are specified
409 		 * (optionally, may include a path of devfns following it)
410 		 */
411 		ret = pci_dev_str_match_path(dev, p, &p);
412 		if (ret < 0)
413 			return ret;
414 		else if (ret)
415 			goto found;
416 	}
417 
418 	*endptr = p;
419 	return 0;
420 
421 found:
422 	*endptr = p;
423 	return 1;
424 }
425 
426 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
427 				  u8 pos, int cap, int *ttl)
428 {
429 	u8 id;
430 	u16 ent;
431 
432 	pci_bus_read_config_byte(bus, devfn, pos, &pos);
433 
434 	while ((*ttl)--) {
435 		if (pos < 0x40)
436 			break;
437 		pos &= ~3;
438 		pci_bus_read_config_word(bus, devfn, pos, &ent);
439 
440 		id = ent & 0xff;
441 		if (id == 0xff)
442 			break;
443 		if (id == cap)
444 			return pos;
445 		pos = (ent >> 8);
446 	}
447 	return 0;
448 }
449 
450 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
451 			      u8 pos, int cap)
452 {
453 	int ttl = PCI_FIND_CAP_TTL;
454 
455 	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
456 }
457 
458 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
459 {
460 	return __pci_find_next_cap(dev->bus, dev->devfn,
461 				   pos + PCI_CAP_LIST_NEXT, cap);
462 }
463 EXPORT_SYMBOL_GPL(pci_find_next_capability);
464 
465 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
466 				    unsigned int devfn, u8 hdr_type)
467 {
468 	u16 status;
469 
470 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
471 	if (!(status & PCI_STATUS_CAP_LIST))
472 		return 0;
473 
474 	switch (hdr_type) {
475 	case PCI_HEADER_TYPE_NORMAL:
476 	case PCI_HEADER_TYPE_BRIDGE:
477 		return PCI_CAPABILITY_LIST;
478 	case PCI_HEADER_TYPE_CARDBUS:
479 		return PCI_CB_CAPABILITY_LIST;
480 	}
481 
482 	return 0;
483 }
484 
485 /**
486  * pci_find_capability - query for devices' capabilities
487  * @dev: PCI device to query
488  * @cap: capability code
489  *
490  * Tell if a device supports a given PCI capability.
491  * Returns the address of the requested capability structure within the
492  * device's PCI configuration space or 0 in case the device does not
493  * support it.  Possible values for @cap include:
494  *
495  *  %PCI_CAP_ID_PM           Power Management
496  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
497  *  %PCI_CAP_ID_VPD          Vital Product Data
498  *  %PCI_CAP_ID_SLOTID       Slot Identification
499  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
500  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
501  *  %PCI_CAP_ID_PCIX         PCI-X
502  *  %PCI_CAP_ID_EXP          PCI Express
503  */
504 u8 pci_find_capability(struct pci_dev *dev, int cap)
505 {
506 	u8 pos;
507 
508 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
509 	if (pos)
510 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
511 
512 	return pos;
513 }
514 EXPORT_SYMBOL(pci_find_capability);
515 
516 /**
517  * pci_bus_find_capability - query for devices' capabilities
518  * @bus: the PCI bus to query
519  * @devfn: PCI device to query
520  * @cap: capability code
521  *
522  * Like pci_find_capability() but works for PCI devices that do not have a
523  * pci_dev structure set up yet.
524  *
525  * Returns the address of the requested capability structure within the
526  * device's PCI configuration space or 0 in case the device does not
527  * support it.
528  */
529 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
530 {
531 	u8 hdr_type, pos;
532 
533 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
534 
535 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK);
536 	if (pos)
537 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
538 
539 	return pos;
540 }
541 EXPORT_SYMBOL(pci_bus_find_capability);
542 
543 /**
544  * pci_find_next_ext_capability - Find an extended capability
545  * @dev: PCI device to query
546  * @start: address at which to start looking (0 to start at beginning of list)
547  * @cap: capability code
548  *
549  * Returns the address of the next matching extended capability structure
550  * within the device's PCI configuration space or 0 if the device does
551  * not support it.  Some capabilities can occur several times, e.g., the
552  * vendor-specific capability, and this provides a way to find them all.
553  */
554 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
555 {
556 	u32 header;
557 	int ttl;
558 	u16 pos = PCI_CFG_SPACE_SIZE;
559 
560 	/* minimum 8 bytes per capability */
561 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
562 
563 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
564 		return 0;
565 
566 	if (start)
567 		pos = start;
568 
569 	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
570 		return 0;
571 
572 	/*
573 	 * If we have no capabilities, this is indicated by cap ID,
574 	 * cap version and next pointer all being 0.
575 	 */
576 	if (header == 0)
577 		return 0;
578 
579 	while (ttl-- > 0) {
580 		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
581 			return pos;
582 
583 		pos = PCI_EXT_CAP_NEXT(header);
584 		if (pos < PCI_CFG_SPACE_SIZE)
585 			break;
586 
587 		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
588 			break;
589 	}
590 
591 	return 0;
592 }
593 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
594 
595 /**
596  * pci_find_ext_capability - Find an extended capability
597  * @dev: PCI device to query
598  * @cap: capability code
599  *
600  * Returns the address of the requested extended capability structure
601  * within the device's PCI configuration space or 0 if the device does
602  * not support it.  Possible values for @cap include:
603  *
604  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
605  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
606  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
607  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
608  */
609 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
610 {
611 	return pci_find_next_ext_capability(dev, 0, cap);
612 }
613 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
614 
615 /**
616  * pci_get_dsn - Read and return the 8-byte Device Serial Number
617  * @dev: PCI device to query
618  *
619  * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
620  * Number.
621  *
622  * Returns the DSN, or zero if the capability does not exist.
623  */
624 u64 pci_get_dsn(struct pci_dev *dev)
625 {
626 	u32 dword;
627 	u64 dsn;
628 	int pos;
629 
630 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
631 	if (!pos)
632 		return 0;
633 
634 	/*
635 	 * The Device Serial Number is two dwords offset 4 bytes from the
636 	 * capability position. The specification says that the first dword is
637 	 * the lower half, and the second dword is the upper half.
638 	 */
639 	pos += 4;
640 	pci_read_config_dword(dev, pos, &dword);
641 	dsn = (u64)dword;
642 	pci_read_config_dword(dev, pos + 4, &dword);
643 	dsn |= ((u64)dword) << 32;
644 
645 	return dsn;
646 }
647 EXPORT_SYMBOL_GPL(pci_get_dsn);
648 
649 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
650 {
651 	int rc, ttl = PCI_FIND_CAP_TTL;
652 	u8 cap, mask;
653 
654 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
655 		mask = HT_3BIT_CAP_MASK;
656 	else
657 		mask = HT_5BIT_CAP_MASK;
658 
659 	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
660 				      PCI_CAP_ID_HT, &ttl);
661 	while (pos) {
662 		rc = pci_read_config_byte(dev, pos + 3, &cap);
663 		if (rc != PCIBIOS_SUCCESSFUL)
664 			return 0;
665 
666 		if ((cap & mask) == ht_cap)
667 			return pos;
668 
669 		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
670 					      pos + PCI_CAP_LIST_NEXT,
671 					      PCI_CAP_ID_HT, &ttl);
672 	}
673 
674 	return 0;
675 }
676 
677 /**
678  * pci_find_next_ht_capability - query a device's HyperTransport capabilities
679  * @dev: PCI device to query
680  * @pos: Position from which to continue searching
681  * @ht_cap: HyperTransport capability code
682  *
683  * To be used in conjunction with pci_find_ht_capability() to search for
684  * all capabilities matching @ht_cap. @pos should always be a value returned
685  * from pci_find_ht_capability().
686  *
687  * NB. To be 100% safe against broken PCI devices, the caller should take
688  * steps to avoid an infinite loop.
689  */
690 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
691 {
692 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
693 }
694 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
695 
696 /**
697  * pci_find_ht_capability - query a device's HyperTransport capabilities
698  * @dev: PCI device to query
699  * @ht_cap: HyperTransport capability code
700  *
701  * Tell if a device supports a given HyperTransport capability.
702  * Returns an address within the device's PCI configuration space
703  * or 0 in case the device does not support the request capability.
704  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
705  * which has a HyperTransport capability matching @ht_cap.
706  */
707 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
708 {
709 	u8 pos;
710 
711 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
712 	if (pos)
713 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
714 
715 	return pos;
716 }
717 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
718 
719 /**
720  * pci_find_vsec_capability - Find a vendor-specific extended capability
721  * @dev: PCI device to query
722  * @vendor: Vendor ID for which capability is defined
723  * @cap: Vendor-specific capability ID
724  *
725  * If @dev has Vendor ID @vendor, search for a VSEC capability with
726  * VSEC ID @cap. If found, return the capability offset in
727  * config space; otherwise return 0.
728  */
729 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
730 {
731 	u16 vsec = 0;
732 	u32 header;
733 	int ret;
734 
735 	if (vendor != dev->vendor)
736 		return 0;
737 
738 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
739 						     PCI_EXT_CAP_ID_VNDR))) {
740 		ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
741 		if (ret != PCIBIOS_SUCCESSFUL)
742 			continue;
743 
744 		if (PCI_VNDR_HEADER_ID(header) == cap)
745 			return vsec;
746 	}
747 
748 	return 0;
749 }
750 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
751 
752 /**
753  * pci_find_dvsec_capability - Find DVSEC for vendor
754  * @dev: PCI device to query
755  * @vendor: Vendor ID to match for the DVSEC
756  * @dvsec: Designated Vendor-specific capability ID
757  *
758  * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
759  * offset in config space; otherwise return 0.
760  */
761 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
762 {
763 	int pos;
764 
765 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
766 	if (!pos)
767 		return 0;
768 
769 	while (pos) {
770 		u16 v, id;
771 
772 		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
773 		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
774 		if (vendor == v && dvsec == id)
775 			return pos;
776 
777 		pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
778 	}
779 
780 	return 0;
781 }
782 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
783 
784 /**
785  * pci_find_parent_resource - return resource region of parent bus of given
786  *			      region
787  * @dev: PCI device structure contains resources to be searched
788  * @res: child resource record for which parent is sought
789  *
790  * For given resource region of given device, return the resource region of
791  * parent bus the given region is contained in.
792  */
793 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
794 					  struct resource *res)
795 {
796 	const struct pci_bus *bus = dev->bus;
797 	struct resource *r;
798 
799 	pci_bus_for_each_resource(bus, r) {
800 		if (!r)
801 			continue;
802 		if (resource_contains(r, res)) {
803 
804 			/*
805 			 * If the window is prefetchable but the BAR is
806 			 * not, the allocator made a mistake.
807 			 */
808 			if (r->flags & IORESOURCE_PREFETCH &&
809 			    !(res->flags & IORESOURCE_PREFETCH))
810 				return NULL;
811 
812 			/*
813 			 * If we're below a transparent bridge, there may
814 			 * be both a positively-decoded aperture and a
815 			 * subtractively-decoded region that contain the BAR.
816 			 * We want the positively-decoded one, so this depends
817 			 * on pci_bus_for_each_resource() giving us those
818 			 * first.
819 			 */
820 			return r;
821 		}
822 	}
823 	return NULL;
824 }
825 EXPORT_SYMBOL(pci_find_parent_resource);
826 
827 /**
828  * pci_find_resource - Return matching PCI device resource
829  * @dev: PCI device to query
830  * @res: Resource to look for
831  *
832  * Goes over standard PCI resources (BARs) and checks if the given resource
833  * is partially or fully contained in any of them. In that case the
834  * matching resource is returned, %NULL otherwise.
835  */
836 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
837 {
838 	int i;
839 
840 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
841 		struct resource *r = &dev->resource[i];
842 
843 		if (r->start && resource_contains(r, res))
844 			return r;
845 	}
846 
847 	return NULL;
848 }
849 EXPORT_SYMBOL(pci_find_resource);
850 
851 /**
852  * pci_resource_name - Return the name of the PCI resource
853  * @dev: PCI device to query
854  * @i: index of the resource
855  *
856  * Return the standard PCI resource (BAR) name according to their index.
857  */
858 const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
859 {
860 	static const char * const bar_name[] = {
861 		"BAR 0",
862 		"BAR 1",
863 		"BAR 2",
864 		"BAR 3",
865 		"BAR 4",
866 		"BAR 5",
867 		"ROM",
868 #ifdef CONFIG_PCI_IOV
869 		"VF BAR 0",
870 		"VF BAR 1",
871 		"VF BAR 2",
872 		"VF BAR 3",
873 		"VF BAR 4",
874 		"VF BAR 5",
875 #endif
876 		"bridge window",	/* "io" included in %pR */
877 		"bridge window",	/* "mem" included in %pR */
878 		"bridge window",	/* "mem pref" included in %pR */
879 	};
880 	static const char * const cardbus_name[] = {
881 		"BAR 1",
882 		"unknown",
883 		"unknown",
884 		"unknown",
885 		"unknown",
886 		"unknown",
887 #ifdef CONFIG_PCI_IOV
888 		"unknown",
889 		"unknown",
890 		"unknown",
891 		"unknown",
892 		"unknown",
893 		"unknown",
894 #endif
895 		"CardBus bridge window 0",	/* I/O */
896 		"CardBus bridge window 1",	/* I/O */
897 		"CardBus bridge window 0",	/* mem */
898 		"CardBus bridge window 1",	/* mem */
899 	};
900 
901 	if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
902 	    i < ARRAY_SIZE(cardbus_name))
903 		return cardbus_name[i];
904 
905 	if (i < ARRAY_SIZE(bar_name))
906 		return bar_name[i];
907 
908 	return "unknown";
909 }
910 
911 /**
912  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
913  * @dev: the PCI device to operate on
914  * @pos: config space offset of status word
915  * @mask: mask of bit(s) to care about in status word
916  *
917  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
918  */
919 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
920 {
921 	int i;
922 
923 	/* Wait for Transaction Pending bit clean */
924 	for (i = 0; i < 4; i++) {
925 		u16 status;
926 		if (i)
927 			msleep((1 << (i - 1)) * 100);
928 
929 		pci_read_config_word(dev, pos, &status);
930 		if (!(status & mask))
931 			return 1;
932 	}
933 
934 	return 0;
935 }
936 
937 static int pci_acs_enable;
938 
939 /**
940  * pci_request_acs - ask for ACS to be enabled if supported
941  */
942 void pci_request_acs(void)
943 {
944 	pci_acs_enable = 1;
945 }
946 
947 static const char *disable_acs_redir_param;
948 static const char *config_acs_param;
949 
950 struct pci_acs {
951 	u16 cap;
952 	u16 ctrl;
953 	u16 fw_ctrl;
954 };
955 
956 static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
957 			     const char *p, const u16 acs_mask, const u16 acs_flags)
958 {
959 	u16 flags = acs_flags;
960 	u16 mask = acs_mask;
961 	char *delimit;
962 	int ret = 0;
963 
964 	if (!p)
965 		return;
966 
967 	while (*p) {
968 		if (!acs_mask) {
969 			/* Check for ACS flags */
970 			delimit = strstr(p, "@");
971 			if (delimit) {
972 				int end;
973 				u32 shift = 0;
974 
975 				end = delimit - p - 1;
976 				mask = 0;
977 				flags = 0;
978 
979 				while (end > -1) {
980 					if (*(p + end) == '0') {
981 						mask |= 1 << shift;
982 						shift++;
983 						end--;
984 					} else if (*(p + end) == '1') {
985 						mask |= 1 << shift;
986 						flags |= 1 << shift;
987 						shift++;
988 						end--;
989 					} else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
990 						shift++;
991 						end--;
992 					} else {
993 						pci_err(dev, "Invalid ACS flags... Ignoring\n");
994 						return;
995 					}
996 				}
997 				p = delimit + 1;
998 			} else {
999 				pci_err(dev, "ACS Flags missing\n");
1000 				return;
1001 			}
1002 		}
1003 
1004 		if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
1005 			    PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
1006 			pci_err(dev, "Invalid ACS flags specified\n");
1007 			return;
1008 		}
1009 
1010 		ret = pci_dev_str_match(dev, p, &p);
1011 		if (ret < 0) {
1012 			pr_info_once("PCI: Can't parse ACS command line parameter\n");
1013 			break;
1014 		} else if (ret == 1) {
1015 			/* Found a match */
1016 			break;
1017 		}
1018 
1019 		if (*p != ';' && *p != ',') {
1020 			/* End of param or invalid format */
1021 			break;
1022 		}
1023 		p++;
1024 	}
1025 
1026 	if (ret != 1)
1027 		return;
1028 
1029 	if (!pci_dev_specific_disable_acs_redir(dev))
1030 		return;
1031 
1032 	pci_dbg(dev, "ACS mask  = %#06x\n", mask);
1033 	pci_dbg(dev, "ACS flags = %#06x\n", flags);
1034 	pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl);
1035 	pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl);
1036 
1037 	/*
1038 	 * For mask bits that are 0, copy them from the firmware setting
1039 	 * and apply flags for all the mask bits that are 1.
1040 	 */
1041 	caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask);
1042 
1043 	pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
1044 }
1045 
1046 /**
1047  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
1048  * @dev: the PCI device
1049  * @caps: default ACS controls
1050  */
1051 static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
1052 {
1053 	/* Source Validation */
1054 	caps->ctrl |= (caps->cap & PCI_ACS_SV);
1055 
1056 	/* P2P Request Redirect */
1057 	caps->ctrl |= (caps->cap & PCI_ACS_RR);
1058 
1059 	/* P2P Completion Redirect */
1060 	caps->ctrl |= (caps->cap & PCI_ACS_CR);
1061 
1062 	/* Upstream Forwarding */
1063 	caps->ctrl |= (caps->cap & PCI_ACS_UF);
1064 
1065 	/* Enable Translation Blocking for external devices and noats */
1066 	if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
1067 		caps->ctrl |= (caps->cap & PCI_ACS_TB);
1068 }
1069 
1070 /**
1071  * pci_enable_acs - enable ACS if hardware support it
1072  * @dev: the PCI device
1073  */
1074 static void pci_enable_acs(struct pci_dev *dev)
1075 {
1076 	struct pci_acs caps;
1077 	bool enable_acs = false;
1078 	int pos;
1079 
1080 	/* If an iommu is present we start with kernel default caps */
1081 	if (pci_acs_enable) {
1082 		if (pci_dev_specific_enable_acs(dev))
1083 			enable_acs = true;
1084 	}
1085 
1086 	pos = dev->acs_cap;
1087 	if (!pos)
1088 		return;
1089 
1090 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
1091 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
1092 	caps.fw_ctrl = caps.ctrl;
1093 
1094 	if (enable_acs)
1095 		pci_std_enable_acs(dev, &caps);
1096 
1097 	/*
1098 	 * Always apply caps from the command line, even if there is no iommu.
1099 	 * Trust that the admin has a reason to change the ACS settings.
1100 	 */
1101 	__pci_config_acs(dev, &caps, disable_acs_redir_param,
1102 			 PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
1103 			 ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
1104 	__pci_config_acs(dev, &caps, config_acs_param, 0, 0);
1105 
1106 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
1107 }
1108 
1109 /**
1110  * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
1111  * @dev: PCI device to have its BARs restored
1112  *
1113  * Restore the BAR values for a given device, so as to make it
1114  * accessible by its driver.
1115  */
1116 static void pci_restore_bars(struct pci_dev *dev)
1117 {
1118 	int i;
1119 
1120 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1121 		pci_update_resource(dev, i);
1122 }
1123 
1124 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1125 {
1126 	if (pci_use_mid_pm())
1127 		return true;
1128 
1129 	return acpi_pci_power_manageable(dev);
1130 }
1131 
1132 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1133 					       pci_power_t t)
1134 {
1135 	if (pci_use_mid_pm())
1136 		return mid_pci_set_power_state(dev, t);
1137 
1138 	return acpi_pci_set_power_state(dev, t);
1139 }
1140 
1141 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1142 {
1143 	if (pci_use_mid_pm())
1144 		return mid_pci_get_power_state(dev);
1145 
1146 	return acpi_pci_get_power_state(dev);
1147 }
1148 
1149 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1150 {
1151 	if (!pci_use_mid_pm())
1152 		acpi_pci_refresh_power_state(dev);
1153 }
1154 
1155 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1156 {
1157 	if (pci_use_mid_pm())
1158 		return PCI_POWER_ERROR;
1159 
1160 	return acpi_pci_choose_state(dev);
1161 }
1162 
1163 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1164 {
1165 	if (pci_use_mid_pm())
1166 		return PCI_POWER_ERROR;
1167 
1168 	return acpi_pci_wakeup(dev, enable);
1169 }
1170 
1171 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1172 {
1173 	if (pci_use_mid_pm())
1174 		return false;
1175 
1176 	return acpi_pci_need_resume(dev);
1177 }
1178 
1179 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1180 {
1181 	if (pci_use_mid_pm())
1182 		return false;
1183 
1184 	return acpi_pci_bridge_d3(dev);
1185 }
1186 
1187 /**
1188  * pci_update_current_state - Read power state of given device and cache it
1189  * @dev: PCI device to handle.
1190  * @state: State to cache in case the device doesn't have the PM capability
1191  *
1192  * The power state is read from the PMCSR register, which however is
1193  * inaccessible in D3cold.  The platform firmware is therefore queried first
1194  * to detect accessibility of the register.  In case the platform firmware
1195  * reports an incorrect state or the device isn't power manageable by the
1196  * platform at all, we try to detect D3cold by testing accessibility of the
1197  * vendor ID in config space.
1198  */
1199 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1200 {
1201 	if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1202 		dev->current_state = PCI_D3cold;
1203 	} else if (dev->pm_cap) {
1204 		u16 pmcsr;
1205 
1206 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1207 		if (PCI_POSSIBLE_ERROR(pmcsr)) {
1208 			dev->current_state = PCI_D3cold;
1209 			return;
1210 		}
1211 		dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1212 	} else {
1213 		dev->current_state = state;
1214 	}
1215 }
1216 
1217 /**
1218  * pci_refresh_power_state - Refresh the given device's power state data
1219  * @dev: Target PCI device.
1220  *
1221  * Ask the platform to refresh the devices power state information and invoke
1222  * pci_update_current_state() to update its current PCI power state.
1223  */
1224 void pci_refresh_power_state(struct pci_dev *dev)
1225 {
1226 	platform_pci_refresh_power_state(dev);
1227 	pci_update_current_state(dev, dev->current_state);
1228 }
1229 
1230 /**
1231  * pci_platform_power_transition - Use platform to change device power state
1232  * @dev: PCI device to handle.
1233  * @state: State to put the device into.
1234  */
1235 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1236 {
1237 	int error;
1238 
1239 	error = platform_pci_set_power_state(dev, state);
1240 	if (!error)
1241 		pci_update_current_state(dev, state);
1242 	else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1243 		dev->current_state = PCI_D0;
1244 
1245 	return error;
1246 }
1247 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1248 
1249 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1250 {
1251 	pm_request_resume(&pci_dev->dev);
1252 	return 0;
1253 }
1254 
1255 /**
1256  * pci_resume_bus - Walk given bus and runtime resume devices on it
1257  * @bus: Top bus of the subtree to walk.
1258  */
1259 void pci_resume_bus(struct pci_bus *bus)
1260 {
1261 	if (bus)
1262 		pci_walk_bus(bus, pci_resume_one, NULL);
1263 }
1264 
1265 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1266 {
1267 	int delay = 1;
1268 	bool retrain = false;
1269 	struct pci_dev *root, *bridge;
1270 
1271 	root = pcie_find_root_port(dev);
1272 
1273 	if (pci_is_pcie(dev)) {
1274 		bridge = pci_upstream_bridge(dev);
1275 		if (bridge)
1276 			retrain = true;
1277 	}
1278 
1279 	/*
1280 	 * The caller has already waited long enough after a reset that the
1281 	 * device should respond to config requests, but it may respond
1282 	 * with Request Retry Status (RRS) if it needs more time to
1283 	 * initialize.
1284 	 *
1285 	 * If the device is below a Root Port with Configuration RRS
1286 	 * Software Visibility enabled, reading the Vendor ID returns a
1287 	 * special data value if the device responded with RRS.  Read the
1288 	 * Vendor ID until we get non-RRS status.
1289 	 *
1290 	 * If there's no Root Port or Configuration RRS Software Visibility
1291 	 * is not enabled, the device may still respond with RRS, but
1292 	 * hardware may retry the config request.  If no retries receive
1293 	 * Successful Completion, hardware generally synthesizes ~0
1294 	 * (PCI_ERROR_RESPONSE) data to complete the read.  Reading Vendor
1295 	 * ID for VFs and non-existent devices also returns ~0, so read the
1296 	 * Command register until it returns something other than ~0.
1297 	 */
1298 	for (;;) {
1299 		u32 id;
1300 
1301 		if (pci_dev_is_disconnected(dev)) {
1302 			pci_dbg(dev, "disconnected; not waiting\n");
1303 			return -ENOTTY;
1304 		}
1305 
1306 		if (root && root->config_rrs_sv) {
1307 			pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
1308 			if (!pci_bus_rrs_vendor_id(id))
1309 				break;
1310 		} else {
1311 			pci_read_config_dword(dev, PCI_COMMAND, &id);
1312 			if (!PCI_POSSIBLE_ERROR(id))
1313 				break;
1314 		}
1315 
1316 		if (delay > timeout) {
1317 			pci_warn(dev, "not ready %dms after %s; giving up\n",
1318 				 delay - 1, reset_type);
1319 			return -ENOTTY;
1320 		}
1321 
1322 		if (delay > PCI_RESET_WAIT) {
1323 			if (retrain) {
1324 				retrain = false;
1325 				if (pcie_failed_link_retrain(bridge) == 0) {
1326 					delay = 1;
1327 					continue;
1328 				}
1329 			}
1330 			pci_info(dev, "not ready %dms after %s; waiting\n",
1331 				 delay - 1, reset_type);
1332 		}
1333 
1334 		msleep(delay);
1335 		delay *= 2;
1336 	}
1337 
1338 	if (delay > PCI_RESET_WAIT)
1339 		pci_info(dev, "ready %dms after %s\n", delay - 1,
1340 			 reset_type);
1341 	else
1342 		pci_dbg(dev, "ready %dms after %s\n", delay - 1,
1343 			reset_type);
1344 
1345 	return 0;
1346 }
1347 
1348 /**
1349  * pci_power_up - Put the given device into D0
1350  * @dev: PCI device to power up
1351  *
1352  * On success, return 0 or 1, depending on whether or not it is necessary to
1353  * restore the device's BARs subsequently (1 is returned in that case).
1354  *
1355  * On failure, return a negative error code.  Always return failure if @dev
1356  * lacks a Power Management Capability, even if the platform was able to
1357  * put the device in D0 via non-PCI means.
1358  */
1359 int pci_power_up(struct pci_dev *dev)
1360 {
1361 	bool need_restore;
1362 	pci_power_t state;
1363 	u16 pmcsr;
1364 
1365 	platform_pci_set_power_state(dev, PCI_D0);
1366 
1367 	if (!dev->pm_cap) {
1368 		state = platform_pci_get_power_state(dev);
1369 		if (state == PCI_UNKNOWN)
1370 			dev->current_state = PCI_D0;
1371 		else
1372 			dev->current_state = state;
1373 
1374 		return -EIO;
1375 	}
1376 
1377 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1378 	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1379 		pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1380 			pci_power_name(dev->current_state));
1381 		dev->current_state = PCI_D3cold;
1382 		return -EIO;
1383 	}
1384 
1385 	state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1386 
1387 	need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1388 			!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1389 
1390 	if (state == PCI_D0)
1391 		goto end;
1392 
1393 	/*
1394 	 * Force the entire word to 0. This doesn't affect PME_Status, disables
1395 	 * PME_En, and sets PowerState to 0.
1396 	 */
1397 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1398 
1399 	/* Mandatory transition delays; see PCI PM 1.2. */
1400 	if (state == PCI_D3hot)
1401 		pci_dev_d3_sleep(dev);
1402 	else if (state == PCI_D2)
1403 		udelay(PCI_PM_D2_DELAY);
1404 
1405 end:
1406 	dev->current_state = PCI_D0;
1407 	if (need_restore)
1408 		return 1;
1409 
1410 	return 0;
1411 }
1412 
1413 /**
1414  * pci_set_full_power_state - Put a PCI device into D0 and update its state
1415  * @dev: PCI device to power up
1416  * @locked: whether pci_bus_sem is held
1417  *
1418  * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1419  * to confirm the state change, restore its BARs if they might be lost and
1420  * reconfigure ASPM in accordance with the new power state.
1421  *
1422  * If pci_restore_state() is going to be called right after a power state change
1423  * to D0, it is more efficient to use pci_power_up() directly instead of this
1424  * function.
1425  */
1426 static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
1427 {
1428 	u16 pmcsr;
1429 	int ret;
1430 
1431 	ret = pci_power_up(dev);
1432 	if (ret < 0) {
1433 		if (dev->current_state == PCI_D0)
1434 			return 0;
1435 
1436 		return ret;
1437 	}
1438 
1439 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1440 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1441 	if (dev->current_state != PCI_D0) {
1442 		pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1443 				     pci_power_name(dev->current_state));
1444 	} else if (ret > 0) {
1445 		/*
1446 		 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1447 		 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1448 		 * from D3hot to D0 _may_ perform an internal reset, thereby
1449 		 * going to "D0 Uninitialized" rather than "D0 Initialized".
1450 		 * For example, at least some versions of the 3c905B and the
1451 		 * 3c556B exhibit this behaviour.
1452 		 *
1453 		 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1454 		 * devices in a D3hot state at boot.  Consequently, we need to
1455 		 * restore at least the BARs so that the device will be
1456 		 * accessible to its driver.
1457 		 */
1458 		pci_restore_bars(dev);
1459 	}
1460 
1461 	if (dev->bus->self)
1462 		pcie_aspm_pm_state_change(dev->bus->self, locked);
1463 
1464 	return 0;
1465 }
1466 
1467 /**
1468  * __pci_dev_set_current_state - Set current state of a PCI device
1469  * @dev: Device to handle
1470  * @data: pointer to state to be set
1471  */
1472 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1473 {
1474 	pci_power_t state = *(pci_power_t *)data;
1475 
1476 	dev->current_state = state;
1477 	return 0;
1478 }
1479 
1480 /**
1481  * pci_bus_set_current_state - Walk given bus and set current state of devices
1482  * @bus: Top bus of the subtree to walk.
1483  * @state: state to be set
1484  */
1485 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1486 {
1487 	if (bus)
1488 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1489 }
1490 
1491 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
1492 {
1493 	if (!bus)
1494 		return;
1495 
1496 	if (locked)
1497 		pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
1498 	else
1499 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1500 }
1501 
1502 /**
1503  * pci_set_low_power_state - Put a PCI device into a low-power state.
1504  * @dev: PCI device to handle.
1505  * @state: PCI power state (D1, D2, D3hot) to put the device into.
1506  * @locked: whether pci_bus_sem is held
1507  *
1508  * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1509  *
1510  * RETURN VALUE:
1511  * -EINVAL if the requested state is invalid.
1512  * -EIO if device does not support PCI PM or its PM capabilities register has a
1513  * wrong version, or device doesn't support the requested state.
1514  * 0 if device already is in the requested state.
1515  * 0 if device's power state has been successfully changed.
1516  */
1517 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1518 {
1519 	u16 pmcsr;
1520 
1521 	if (!dev->pm_cap)
1522 		return -EIO;
1523 
1524 	/*
1525 	 * Validate transition: We can enter D0 from any state, but if
1526 	 * we're already in a low-power state, we can only go deeper.  E.g.,
1527 	 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1528 	 * we'd have to go from D3 to D0, then to D1.
1529 	 */
1530 	if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1531 		pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1532 			pci_power_name(dev->current_state),
1533 			pci_power_name(state));
1534 		return -EINVAL;
1535 	}
1536 
1537 	/* Check if this device supports the desired state */
1538 	if ((state == PCI_D1 && !dev->d1_support)
1539 	   || (state == PCI_D2 && !dev->d2_support))
1540 		return -EIO;
1541 
1542 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1543 	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1544 		pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1545 			pci_power_name(dev->current_state),
1546 			pci_power_name(state));
1547 		dev->current_state = PCI_D3cold;
1548 		return -EIO;
1549 	}
1550 
1551 	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1552 	pmcsr |= state;
1553 
1554 	/* Enter specified state */
1555 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1556 
1557 	/* Mandatory power management transition delays; see PCI PM 1.2. */
1558 	if (state == PCI_D3hot)
1559 		pci_dev_d3_sleep(dev);
1560 	else if (state == PCI_D2)
1561 		udelay(PCI_PM_D2_DELAY);
1562 
1563 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1564 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1565 	if (dev->current_state != state)
1566 		pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1567 				     pci_power_name(dev->current_state),
1568 				     pci_power_name(state));
1569 
1570 	if (dev->bus->self)
1571 		pcie_aspm_pm_state_change(dev->bus->self, locked);
1572 
1573 	return 0;
1574 }
1575 
1576 static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1577 {
1578 	int error;
1579 
1580 	/* Bound the state we're entering */
1581 	if (state > PCI_D3cold)
1582 		state = PCI_D3cold;
1583 	else if (state < PCI_D0)
1584 		state = PCI_D0;
1585 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1586 
1587 		/*
1588 		 * If the device or the parent bridge do not support PCI
1589 		 * PM, ignore the request if we're doing anything other
1590 		 * than putting it into D0 (which would only happen on
1591 		 * boot).
1592 		 */
1593 		return 0;
1594 
1595 	/* Check if we're already there */
1596 	if (dev->current_state == state)
1597 		return 0;
1598 
1599 	if (state == PCI_D0)
1600 		return pci_set_full_power_state(dev, locked);
1601 
1602 	/*
1603 	 * This device is quirked not to be put into D3, so don't put it in
1604 	 * D3
1605 	 */
1606 	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1607 		return 0;
1608 
1609 	if (state == PCI_D3cold) {
1610 		/*
1611 		 * To put the device in D3cold, put it into D3hot in the native
1612 		 * way, then put it into D3cold using platform ops.
1613 		 */
1614 		error = pci_set_low_power_state(dev, PCI_D3hot, locked);
1615 
1616 		if (pci_platform_power_transition(dev, PCI_D3cold))
1617 			return error;
1618 
1619 		/* Powering off a bridge may power off the whole hierarchy */
1620 		if (dev->current_state == PCI_D3cold)
1621 			__pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
1622 	} else {
1623 		error = pci_set_low_power_state(dev, state, locked);
1624 
1625 		if (pci_platform_power_transition(dev, state))
1626 			return error;
1627 	}
1628 
1629 	return 0;
1630 }
1631 
1632 /**
1633  * pci_set_power_state - Set the power state of a PCI device
1634  * @dev: PCI device to handle.
1635  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1636  *
1637  * Transition a device to a new power state, using the platform firmware and/or
1638  * the device's PCI PM registers.
1639  *
1640  * RETURN VALUE:
1641  * -EINVAL if the requested state is invalid.
1642  * -EIO if device does not support PCI PM or its PM capabilities register has a
1643  * wrong version, or device doesn't support the requested state.
1644  * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1645  * 0 if device already is in the requested state.
1646  * 0 if the transition is to D3 but D3 is not supported.
1647  * 0 if device's power state has been successfully changed.
1648  */
1649 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1650 {
1651 	return __pci_set_power_state(dev, state, false);
1652 }
1653 EXPORT_SYMBOL(pci_set_power_state);
1654 
1655 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
1656 {
1657 	lockdep_assert_held(&pci_bus_sem);
1658 
1659 	return __pci_set_power_state(dev, state, true);
1660 }
1661 EXPORT_SYMBOL(pci_set_power_state_locked);
1662 
1663 #define PCI_EXP_SAVE_REGS	7
1664 
1665 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1666 						       u16 cap, bool extended)
1667 {
1668 	struct pci_cap_saved_state *tmp;
1669 
1670 	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1671 		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1672 			return tmp;
1673 	}
1674 	return NULL;
1675 }
1676 
1677 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1678 {
1679 	return _pci_find_saved_cap(dev, cap, false);
1680 }
1681 
1682 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1683 {
1684 	return _pci_find_saved_cap(dev, cap, true);
1685 }
1686 
1687 static int pci_save_pcie_state(struct pci_dev *dev)
1688 {
1689 	int i = 0;
1690 	struct pci_cap_saved_state *save_state;
1691 	u16 *cap;
1692 
1693 	if (!pci_is_pcie(dev))
1694 		return 0;
1695 
1696 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1697 	if (!save_state) {
1698 		pci_err(dev, "buffer not found in %s\n", __func__);
1699 		return -ENOMEM;
1700 	}
1701 
1702 	cap = (u16 *)&save_state->cap.data[0];
1703 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1704 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1705 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1706 	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1707 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1708 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1709 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1710 
1711 	pci_save_aspm_l1ss_state(dev);
1712 	pci_save_ltr_state(dev);
1713 
1714 	return 0;
1715 }
1716 
1717 static void pci_restore_pcie_state(struct pci_dev *dev)
1718 {
1719 	int i = 0;
1720 	struct pci_cap_saved_state *save_state;
1721 	u16 *cap;
1722 
1723 	/*
1724 	 * Restore max latencies (in the LTR capability) before enabling
1725 	 * LTR itself in PCI_EXP_DEVCTL2.
1726 	 */
1727 	pci_restore_ltr_state(dev);
1728 	pci_restore_aspm_l1ss_state(dev);
1729 
1730 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1731 	if (!save_state)
1732 		return;
1733 
1734 	/*
1735 	 * Downstream ports reset the LTR enable bit when link goes down.
1736 	 * Check and re-configure the bit here before restoring device.
1737 	 * PCIe r5.0, sec 7.5.3.16.
1738 	 */
1739 	pci_bridge_reconfigure_ltr(dev);
1740 
1741 	cap = (u16 *)&save_state->cap.data[0];
1742 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1743 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1744 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1745 	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1746 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1747 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1748 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1749 }
1750 
1751 static int pci_save_pcix_state(struct pci_dev *dev)
1752 {
1753 	int pos;
1754 	struct pci_cap_saved_state *save_state;
1755 
1756 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1757 	if (!pos)
1758 		return 0;
1759 
1760 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1761 	if (!save_state) {
1762 		pci_err(dev, "buffer not found in %s\n", __func__);
1763 		return -ENOMEM;
1764 	}
1765 
1766 	pci_read_config_word(dev, pos + PCI_X_CMD,
1767 			     (u16 *)save_state->cap.data);
1768 
1769 	return 0;
1770 }
1771 
1772 static void pci_restore_pcix_state(struct pci_dev *dev)
1773 {
1774 	int i = 0, pos;
1775 	struct pci_cap_saved_state *save_state;
1776 	u16 *cap;
1777 
1778 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1779 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1780 	if (!save_state || !pos)
1781 		return;
1782 	cap = (u16 *)&save_state->cap.data[0];
1783 
1784 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1785 }
1786 
1787 /**
1788  * pci_save_state - save the PCI configuration space of a device before
1789  *		    suspending
1790  * @dev: PCI device that we're dealing with
1791  */
1792 int pci_save_state(struct pci_dev *dev)
1793 {
1794 	int i;
1795 	/* XXX: 100% dword access ok here? */
1796 	for (i = 0; i < 16; i++) {
1797 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1798 		pci_dbg(dev, "save config %#04x: %#010x\n",
1799 			i * 4, dev->saved_config_space[i]);
1800 	}
1801 	dev->state_saved = true;
1802 
1803 	i = pci_save_pcie_state(dev);
1804 	if (i != 0)
1805 		return i;
1806 
1807 	i = pci_save_pcix_state(dev);
1808 	if (i != 0)
1809 		return i;
1810 
1811 	pci_save_dpc_state(dev);
1812 	pci_save_aer_state(dev);
1813 	pci_save_ptm_state(dev);
1814 	pci_save_tph_state(dev);
1815 	return pci_save_vc_state(dev);
1816 }
1817 EXPORT_SYMBOL(pci_save_state);
1818 
1819 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1820 				     u32 saved_val, int retry, bool force)
1821 {
1822 	u32 val;
1823 
1824 	pci_read_config_dword(pdev, offset, &val);
1825 	if (!force && val == saved_val)
1826 		return;
1827 
1828 	for (;;) {
1829 		pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n",
1830 			offset, val, saved_val);
1831 		pci_write_config_dword(pdev, offset, saved_val);
1832 		if (retry-- <= 0)
1833 			return;
1834 
1835 		pci_read_config_dword(pdev, offset, &val);
1836 		if (val == saved_val)
1837 			return;
1838 
1839 		mdelay(1);
1840 	}
1841 }
1842 
1843 static void pci_restore_config_space_range(struct pci_dev *pdev,
1844 					   int start, int end, int retry,
1845 					   bool force)
1846 {
1847 	int index;
1848 
1849 	for (index = end; index >= start; index--)
1850 		pci_restore_config_dword(pdev, 4 * index,
1851 					 pdev->saved_config_space[index],
1852 					 retry, force);
1853 }
1854 
1855 static void pci_restore_config_space(struct pci_dev *pdev)
1856 {
1857 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1858 		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1859 		/* Restore BARs before the command register. */
1860 		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1861 		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1862 	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1863 		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1864 
1865 		/*
1866 		 * Force rewriting of prefetch registers to avoid S3 resume
1867 		 * issues on Intel PCI bridges that occur when these
1868 		 * registers are not explicitly written.
1869 		 */
1870 		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1871 		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1872 	} else {
1873 		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1874 	}
1875 }
1876 
1877 static void pci_restore_rebar_state(struct pci_dev *pdev)
1878 {
1879 	unsigned int pos, nbars, i;
1880 	u32 ctrl;
1881 
1882 	pos = pdev->rebar_cap;
1883 	if (!pos)
1884 		return;
1885 
1886 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1887 	nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
1888 
1889 	for (i = 0; i < nbars; i++, pos += 8) {
1890 		struct resource *res;
1891 		int bar_idx, size;
1892 
1893 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1894 		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1895 		res = pci_resource_n(pdev, bar_idx);
1896 		size = pci_rebar_bytes_to_size(resource_size(res));
1897 		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1898 		ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
1899 		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1900 	}
1901 }
1902 
1903 /**
1904  * pci_restore_state - Restore the saved state of a PCI device
1905  * @dev: PCI device that we're dealing with
1906  */
1907 void pci_restore_state(struct pci_dev *dev)
1908 {
1909 	if (!dev->state_saved)
1910 		return;
1911 
1912 	pci_restore_pcie_state(dev);
1913 	pci_restore_pasid_state(dev);
1914 	pci_restore_pri_state(dev);
1915 	pci_restore_ats_state(dev);
1916 	pci_restore_vc_state(dev);
1917 	pci_restore_rebar_state(dev);
1918 	pci_restore_dpc_state(dev);
1919 	pci_restore_ptm_state(dev);
1920 	pci_restore_tph_state(dev);
1921 
1922 	pci_aer_clear_status(dev);
1923 	pci_restore_aer_state(dev);
1924 
1925 	pci_restore_config_space(dev);
1926 
1927 	pci_restore_pcix_state(dev);
1928 	pci_restore_msi_state(dev);
1929 
1930 	/* Restore ACS and IOV configuration state */
1931 	pci_enable_acs(dev);
1932 	pci_restore_iov_state(dev);
1933 
1934 	dev->state_saved = false;
1935 }
1936 EXPORT_SYMBOL(pci_restore_state);
1937 
1938 struct pci_saved_state {
1939 	u32 config_space[16];
1940 	struct pci_cap_saved_data cap[];
1941 };
1942 
1943 /**
1944  * pci_store_saved_state - Allocate and return an opaque struct containing
1945  *			   the device saved state.
1946  * @dev: PCI device that we're dealing with
1947  *
1948  * Return NULL if no state or error.
1949  */
1950 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1951 {
1952 	struct pci_saved_state *state;
1953 	struct pci_cap_saved_state *tmp;
1954 	struct pci_cap_saved_data *cap;
1955 	size_t size;
1956 
1957 	if (!dev->state_saved)
1958 		return NULL;
1959 
1960 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1961 
1962 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1963 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1964 
1965 	state = kzalloc(size, GFP_KERNEL);
1966 	if (!state)
1967 		return NULL;
1968 
1969 	memcpy(state->config_space, dev->saved_config_space,
1970 	       sizeof(state->config_space));
1971 
1972 	cap = state->cap;
1973 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1974 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1975 		memcpy(cap, &tmp->cap, len);
1976 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1977 	}
1978 	/* Empty cap_save terminates list */
1979 
1980 	return state;
1981 }
1982 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1983 
1984 /**
1985  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1986  * @dev: PCI device that we're dealing with
1987  * @state: Saved state returned from pci_store_saved_state()
1988  */
1989 int pci_load_saved_state(struct pci_dev *dev,
1990 			 struct pci_saved_state *state)
1991 {
1992 	struct pci_cap_saved_data *cap;
1993 
1994 	dev->state_saved = false;
1995 
1996 	if (!state)
1997 		return 0;
1998 
1999 	memcpy(dev->saved_config_space, state->config_space,
2000 	       sizeof(state->config_space));
2001 
2002 	cap = state->cap;
2003 	while (cap->size) {
2004 		struct pci_cap_saved_state *tmp;
2005 
2006 		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
2007 		if (!tmp || tmp->cap.size != cap->size)
2008 			return -EINVAL;
2009 
2010 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
2011 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
2012 		       sizeof(struct pci_cap_saved_data) + cap->size);
2013 	}
2014 
2015 	dev->state_saved = true;
2016 	return 0;
2017 }
2018 EXPORT_SYMBOL_GPL(pci_load_saved_state);
2019 
2020 /**
2021  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
2022  *				   and free the memory allocated for it.
2023  * @dev: PCI device that we're dealing with
2024  * @state: Pointer to saved state returned from pci_store_saved_state()
2025  */
2026 int pci_load_and_free_saved_state(struct pci_dev *dev,
2027 				  struct pci_saved_state **state)
2028 {
2029 	int ret = pci_load_saved_state(dev, *state);
2030 	kfree(*state);
2031 	*state = NULL;
2032 	return ret;
2033 }
2034 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
2035 
2036 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
2037 {
2038 	return pci_enable_resources(dev, bars);
2039 }
2040 
2041 static int pci_host_bridge_enable_device(struct pci_dev *dev)
2042 {
2043 	struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
2044 	int err;
2045 
2046 	if (host_bridge && host_bridge->enable_device) {
2047 		err = host_bridge->enable_device(host_bridge, dev);
2048 		if (err)
2049 			return err;
2050 	}
2051 
2052 	return 0;
2053 }
2054 
2055 static void pci_host_bridge_disable_device(struct pci_dev *dev)
2056 {
2057 	struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
2058 
2059 	if (host_bridge && host_bridge->disable_device)
2060 		host_bridge->disable_device(host_bridge, dev);
2061 }
2062 
2063 static int do_pci_enable_device(struct pci_dev *dev, int bars)
2064 {
2065 	int err;
2066 	struct pci_dev *bridge;
2067 	u16 cmd;
2068 	u8 pin;
2069 
2070 	err = pci_set_power_state(dev, PCI_D0);
2071 	if (err < 0 && err != -EIO)
2072 		return err;
2073 
2074 	bridge = pci_upstream_bridge(dev);
2075 	if (bridge)
2076 		pcie_aspm_powersave_config_link(bridge);
2077 
2078 	err = pci_host_bridge_enable_device(dev);
2079 	if (err)
2080 		return err;
2081 
2082 	err = pcibios_enable_device(dev, bars);
2083 	if (err < 0)
2084 		goto err_enable;
2085 	pci_fixup_device(pci_fixup_enable, dev);
2086 
2087 	if (dev->msi_enabled || dev->msix_enabled)
2088 		return 0;
2089 
2090 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
2091 	if (pin) {
2092 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
2093 		if (cmd & PCI_COMMAND_INTX_DISABLE)
2094 			pci_write_config_word(dev, PCI_COMMAND,
2095 					      cmd & ~PCI_COMMAND_INTX_DISABLE);
2096 	}
2097 
2098 	return 0;
2099 
2100 err_enable:
2101 	pci_host_bridge_disable_device(dev);
2102 
2103 	return err;
2104 
2105 }
2106 
2107 /**
2108  * pci_reenable_device - Resume abandoned device
2109  * @dev: PCI device to be resumed
2110  *
2111  * NOTE: This function is a backend of pci_default_resume() and is not supposed
2112  * to be called by normal code, write proper resume handler and use it instead.
2113  */
2114 int pci_reenable_device(struct pci_dev *dev)
2115 {
2116 	if (pci_is_enabled(dev))
2117 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
2118 	return 0;
2119 }
2120 EXPORT_SYMBOL(pci_reenable_device);
2121 
2122 static void pci_enable_bridge(struct pci_dev *dev)
2123 {
2124 	struct pci_dev *bridge;
2125 	int retval;
2126 
2127 	bridge = pci_upstream_bridge(dev);
2128 	if (bridge)
2129 		pci_enable_bridge(bridge);
2130 
2131 	if (pci_is_enabled(dev)) {
2132 		if (!dev->is_busmaster)
2133 			pci_set_master(dev);
2134 		return;
2135 	}
2136 
2137 	retval = pci_enable_device(dev);
2138 	if (retval)
2139 		pci_err(dev, "Error enabling bridge (%d), continuing\n",
2140 			retval);
2141 	pci_set_master(dev);
2142 }
2143 
2144 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
2145 {
2146 	struct pci_dev *bridge;
2147 	int err;
2148 	int i, bars = 0;
2149 
2150 	/*
2151 	 * Power state could be unknown at this point, either due to a fresh
2152 	 * boot or a device removal call.  So get the current power state
2153 	 * so that things like MSI message writing will behave as expected
2154 	 * (e.g. if the device really is in D0 at enable time).
2155 	 */
2156 	pci_update_current_state(dev, dev->current_state);
2157 
2158 	if (atomic_inc_return(&dev->enable_cnt) > 1)
2159 		return 0;		/* already enabled */
2160 
2161 	bridge = pci_upstream_bridge(dev);
2162 	if (bridge)
2163 		pci_enable_bridge(bridge);
2164 
2165 	/* only skip sriov related */
2166 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2167 		if (dev->resource[i].flags & flags)
2168 			bars |= (1 << i);
2169 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2170 		if (dev->resource[i].flags & flags)
2171 			bars |= (1 << i);
2172 
2173 	err = do_pci_enable_device(dev, bars);
2174 	if (err < 0)
2175 		atomic_dec(&dev->enable_cnt);
2176 	return err;
2177 }
2178 
2179 /**
2180  * pci_enable_device_mem - Initialize a device for use with Memory space
2181  * @dev: PCI device to be initialized
2182  *
2183  * Initialize device before it's used by a driver. Ask low-level code
2184  * to enable Memory resources. Wake up the device if it was suspended.
2185  * Beware, this function can fail.
2186  */
2187 int pci_enable_device_mem(struct pci_dev *dev)
2188 {
2189 	return pci_enable_device_flags(dev, IORESOURCE_MEM);
2190 }
2191 EXPORT_SYMBOL(pci_enable_device_mem);
2192 
2193 /**
2194  * pci_enable_device - Initialize device before it's used by a driver.
2195  * @dev: PCI device to be initialized
2196  *
2197  * Initialize device before it's used by a driver. Ask low-level code
2198  * to enable I/O and memory. Wake up the device if it was suspended.
2199  * Beware, this function can fail.
2200  *
2201  * Note we don't actually enable the device many times if we call
2202  * this function repeatedly (we just increment the count).
2203  */
2204 int pci_enable_device(struct pci_dev *dev)
2205 {
2206 	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2207 }
2208 EXPORT_SYMBOL(pci_enable_device);
2209 
2210 /*
2211  * pcibios_device_add - provide arch specific hooks when adding device dev
2212  * @dev: the PCI device being added
2213  *
2214  * Permits the platform to provide architecture specific functionality when
2215  * devices are added. This is the default implementation. Architecture
2216  * implementations can override this.
2217  */
2218 int __weak pcibios_device_add(struct pci_dev *dev)
2219 {
2220 	return 0;
2221 }
2222 
2223 /**
2224  * pcibios_release_device - provide arch specific hooks when releasing
2225  *			    device dev
2226  * @dev: the PCI device being released
2227  *
2228  * Permits the platform to provide architecture specific functionality when
2229  * devices are released. This is the default implementation. Architecture
2230  * implementations can override this.
2231  */
2232 void __weak pcibios_release_device(struct pci_dev *dev) {}
2233 
2234 /**
2235  * pcibios_disable_device - disable arch specific PCI resources for device dev
2236  * @dev: the PCI device to disable
2237  *
2238  * Disables architecture specific PCI resources for the device. This
2239  * is the default implementation. Architecture implementations can
2240  * override this.
2241  */
2242 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2243 
2244 static void do_pci_disable_device(struct pci_dev *dev)
2245 {
2246 	u16 pci_command;
2247 
2248 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2249 	if (pci_command & PCI_COMMAND_MASTER) {
2250 		pci_command &= ~PCI_COMMAND_MASTER;
2251 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
2252 	}
2253 
2254 	pcibios_disable_device(dev);
2255 }
2256 
2257 /**
2258  * pci_disable_enabled_device - Disable device without updating enable_cnt
2259  * @dev: PCI device to disable
2260  *
2261  * NOTE: This function is a backend of PCI power management routines and is
2262  * not supposed to be called drivers.
2263  */
2264 void pci_disable_enabled_device(struct pci_dev *dev)
2265 {
2266 	if (pci_is_enabled(dev))
2267 		do_pci_disable_device(dev);
2268 }
2269 
2270 /**
2271  * pci_disable_device - Disable PCI device after use
2272  * @dev: PCI device to be disabled
2273  *
2274  * Signal to the system that the PCI device is not in use by the system
2275  * anymore.  This only involves disabling PCI bus-mastering, if active.
2276  *
2277  * Note we don't actually disable the device until all callers of
2278  * pci_enable_device() have called pci_disable_device().
2279  */
2280 void pci_disable_device(struct pci_dev *dev)
2281 {
2282 	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2283 		      "disabling already-disabled device");
2284 
2285 	if (atomic_dec_return(&dev->enable_cnt) != 0)
2286 		return;
2287 
2288 	pci_host_bridge_disable_device(dev);
2289 
2290 	do_pci_disable_device(dev);
2291 
2292 	dev->is_busmaster = 0;
2293 }
2294 EXPORT_SYMBOL(pci_disable_device);
2295 
2296 /**
2297  * pcibios_set_pcie_reset_state - set reset state for device dev
2298  * @dev: the PCIe device reset
2299  * @state: Reset state to enter into
2300  *
2301  * Set the PCIe reset state for the device. This is the default
2302  * implementation. Architecture implementations can override this.
2303  */
2304 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2305 					enum pcie_reset_state state)
2306 {
2307 	return -EINVAL;
2308 }
2309 
2310 /**
2311  * pci_set_pcie_reset_state - set reset state for device dev
2312  * @dev: the PCIe device reset
2313  * @state: Reset state to enter into
2314  *
2315  * Sets the PCI reset state for the device.
2316  */
2317 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2318 {
2319 	return pcibios_set_pcie_reset_state(dev, state);
2320 }
2321 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2322 
2323 #ifdef CONFIG_PCIEAER
2324 void pcie_clear_device_status(struct pci_dev *dev)
2325 {
2326 	u16 sta;
2327 
2328 	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2329 	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2330 }
2331 #endif
2332 
2333 /**
2334  * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2335  * @dev: PCIe root port or event collector.
2336  */
2337 void pcie_clear_root_pme_status(struct pci_dev *dev)
2338 {
2339 	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2340 }
2341 
2342 /**
2343  * pci_check_pme_status - Check if given device has generated PME.
2344  * @dev: Device to check.
2345  *
2346  * Check the PME status of the device and if set, clear it and clear PME enable
2347  * (if set).  Return 'true' if PME status and PME enable were both set or
2348  * 'false' otherwise.
2349  */
2350 bool pci_check_pme_status(struct pci_dev *dev)
2351 {
2352 	int pmcsr_pos;
2353 	u16 pmcsr;
2354 	bool ret = false;
2355 
2356 	if (!dev->pm_cap)
2357 		return false;
2358 
2359 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2360 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2361 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2362 		return false;
2363 
2364 	/* Clear PME status. */
2365 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2366 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2367 		/* Disable PME to avoid interrupt flood. */
2368 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2369 		ret = true;
2370 	}
2371 
2372 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2373 
2374 	return ret;
2375 }
2376 
2377 /**
2378  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2379  * @dev: Device to handle.
2380  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2381  *
2382  * Check if @dev has generated PME and queue a resume request for it in that
2383  * case.
2384  */
2385 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2386 {
2387 	if (pme_poll_reset && dev->pme_poll)
2388 		dev->pme_poll = false;
2389 
2390 	if (pci_check_pme_status(dev)) {
2391 		pci_wakeup_event(dev);
2392 		pm_request_resume(&dev->dev);
2393 	}
2394 	return 0;
2395 }
2396 
2397 /**
2398  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2399  * @bus: Top bus of the subtree to walk.
2400  */
2401 void pci_pme_wakeup_bus(struct pci_bus *bus)
2402 {
2403 	if (bus)
2404 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2405 }
2406 
2407 
2408 /**
2409  * pci_pme_capable - check the capability of PCI device to generate PME#
2410  * @dev: PCI device to handle.
2411  * @state: PCI state from which device will issue PME#.
2412  */
2413 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2414 {
2415 	if (!dev->pm_cap)
2416 		return false;
2417 
2418 	return !!(dev->pme_support & (1 << state));
2419 }
2420 EXPORT_SYMBOL(pci_pme_capable);
2421 
2422 static void pci_pme_list_scan(struct work_struct *work)
2423 {
2424 	struct pci_pme_device *pme_dev, *n;
2425 
2426 	mutex_lock(&pci_pme_list_mutex);
2427 	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2428 		struct pci_dev *pdev = pme_dev->dev;
2429 
2430 		if (pdev->pme_poll) {
2431 			struct pci_dev *bridge = pdev->bus->self;
2432 			struct device *dev = &pdev->dev;
2433 			struct device *bdev = bridge ? &bridge->dev : NULL;
2434 			int bref = 0;
2435 
2436 			/*
2437 			 * If we have a bridge, it should be in an active/D0
2438 			 * state or the configuration space of subordinate
2439 			 * devices may not be accessible or stable over the
2440 			 * course of the call.
2441 			 */
2442 			if (bdev) {
2443 				bref = pm_runtime_get_if_active(bdev);
2444 				if (!bref)
2445 					continue;
2446 
2447 				if (bridge->current_state != PCI_D0)
2448 					goto put_bridge;
2449 			}
2450 
2451 			/*
2452 			 * The device itself should be suspended but config
2453 			 * space must be accessible, therefore it cannot be in
2454 			 * D3cold.
2455 			 */
2456 			if (pm_runtime_suspended(dev) &&
2457 			    pdev->current_state != PCI_D3cold)
2458 				pci_pme_wakeup(pdev, NULL);
2459 
2460 put_bridge:
2461 			if (bref > 0)
2462 				pm_runtime_put(bdev);
2463 		} else {
2464 			list_del(&pme_dev->list);
2465 			kfree(pme_dev);
2466 		}
2467 	}
2468 	if (!list_empty(&pci_pme_list))
2469 		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2470 				   msecs_to_jiffies(PME_TIMEOUT));
2471 	mutex_unlock(&pci_pme_list_mutex);
2472 }
2473 
2474 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2475 {
2476 	u16 pmcsr;
2477 
2478 	if (!dev->pme_support)
2479 		return;
2480 
2481 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2482 	/* Clear PME_Status by writing 1 to it and enable PME# */
2483 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2484 	if (!enable)
2485 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2486 
2487 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2488 }
2489 
2490 /**
2491  * pci_pme_restore - Restore PME configuration after config space restore.
2492  * @dev: PCI device to update.
2493  */
2494 void pci_pme_restore(struct pci_dev *dev)
2495 {
2496 	u16 pmcsr;
2497 
2498 	if (!dev->pme_support)
2499 		return;
2500 
2501 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2502 	if (dev->wakeup_prepared) {
2503 		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2504 		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2505 	} else {
2506 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2507 		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2508 	}
2509 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2510 }
2511 
2512 /**
2513  * pci_pme_active - enable or disable PCI device's PME# function
2514  * @dev: PCI device to handle.
2515  * @enable: 'true' to enable PME# generation; 'false' to disable it.
2516  *
2517  * The caller must verify that the device is capable of generating PME# before
2518  * calling this function with @enable equal to 'true'.
2519  */
2520 void pci_pme_active(struct pci_dev *dev, bool enable)
2521 {
2522 	__pci_pme_active(dev, enable);
2523 
2524 	/*
2525 	 * PCI (as opposed to PCIe) PME requires that the device have
2526 	 * its PME# line hooked up correctly. Not all hardware vendors
2527 	 * do this, so the PME never gets delivered and the device
2528 	 * remains asleep. The easiest way around this is to
2529 	 * periodically walk the list of suspended devices and check
2530 	 * whether any have their PME flag set. The assumption is that
2531 	 * we'll wake up often enough anyway that this won't be a huge
2532 	 * hit, and the power savings from the devices will still be a
2533 	 * win.
2534 	 *
2535 	 * Although PCIe uses in-band PME message instead of PME# line
2536 	 * to report PME, PME does not work for some PCIe devices in
2537 	 * reality.  For example, there are devices that set their PME
2538 	 * status bits, but don't really bother to send a PME message;
2539 	 * there are PCI Express Root Ports that don't bother to
2540 	 * trigger interrupts when they receive PME messages from the
2541 	 * devices below.  So PME poll is used for PCIe devices too.
2542 	 */
2543 
2544 	if (dev->pme_poll) {
2545 		struct pci_pme_device *pme_dev;
2546 		if (enable) {
2547 			pme_dev = kmalloc(sizeof(struct pci_pme_device),
2548 					  GFP_KERNEL);
2549 			if (!pme_dev) {
2550 				pci_warn(dev, "can't enable PME#\n");
2551 				return;
2552 			}
2553 			pme_dev->dev = dev;
2554 			mutex_lock(&pci_pme_list_mutex);
2555 			list_add(&pme_dev->list, &pci_pme_list);
2556 			if (list_is_singular(&pci_pme_list))
2557 				queue_delayed_work(system_freezable_wq,
2558 						   &pci_pme_work,
2559 						   msecs_to_jiffies(PME_TIMEOUT));
2560 			mutex_unlock(&pci_pme_list_mutex);
2561 		} else {
2562 			mutex_lock(&pci_pme_list_mutex);
2563 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2564 				if (pme_dev->dev == dev) {
2565 					list_del(&pme_dev->list);
2566 					kfree(pme_dev);
2567 					break;
2568 				}
2569 			}
2570 			mutex_unlock(&pci_pme_list_mutex);
2571 		}
2572 	}
2573 
2574 	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2575 }
2576 EXPORT_SYMBOL(pci_pme_active);
2577 
2578 /**
2579  * __pci_enable_wake - enable PCI device as wakeup event source
2580  * @dev: PCI device affected
2581  * @state: PCI state from which device will issue wakeup events
2582  * @enable: True to enable event generation; false to disable
2583  *
2584  * This enables the device as a wakeup event source, or disables it.
2585  * When such events involves platform-specific hooks, those hooks are
2586  * called automatically by this routine.
2587  *
2588  * Devices with legacy power management (no standard PCI PM capabilities)
2589  * always require such platform hooks.
2590  *
2591  * RETURN VALUE:
2592  * 0 is returned on success
2593  * -EINVAL is returned if device is not supposed to wake up the system
2594  * Error code depending on the platform is returned if both the platform and
2595  * the native mechanism fail to enable the generation of wake-up events
2596  */
2597 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2598 {
2599 	int ret = 0;
2600 
2601 	/*
2602 	 * Bridges that are not power-manageable directly only signal
2603 	 * wakeup on behalf of subordinate devices which is set up
2604 	 * elsewhere, so skip them. However, bridges that are
2605 	 * power-manageable may signal wakeup for themselves (for example,
2606 	 * on a hotplug event) and they need to be covered here.
2607 	 */
2608 	if (!pci_power_manageable(dev))
2609 		return 0;
2610 
2611 	/* Don't do the same thing twice in a row for one device. */
2612 	if (!!enable == !!dev->wakeup_prepared)
2613 		return 0;
2614 
2615 	/*
2616 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2617 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2618 	 * enable.  To disable wake-up we call the platform first, for symmetry.
2619 	 */
2620 
2621 	if (enable) {
2622 		int error;
2623 
2624 		/*
2625 		 * Enable PME signaling if the device can signal PME from
2626 		 * D3cold regardless of whether or not it can signal PME from
2627 		 * the current target state, because that will allow it to
2628 		 * signal PME when the hierarchy above it goes into D3cold and
2629 		 * the device itself ends up in D3cold as a result of that.
2630 		 */
2631 		if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2632 			pci_pme_active(dev, true);
2633 		else
2634 			ret = 1;
2635 		error = platform_pci_set_wakeup(dev, true);
2636 		if (ret)
2637 			ret = error;
2638 		if (!ret)
2639 			dev->wakeup_prepared = true;
2640 	} else {
2641 		platform_pci_set_wakeup(dev, false);
2642 		pci_pme_active(dev, false);
2643 		dev->wakeup_prepared = false;
2644 	}
2645 
2646 	return ret;
2647 }
2648 
2649 /**
2650  * pci_enable_wake - change wakeup settings for a PCI device
2651  * @pci_dev: Target device
2652  * @state: PCI state from which device will issue wakeup events
2653  * @enable: Whether or not to enable event generation
2654  *
2655  * If @enable is set, check device_may_wakeup() for the device before calling
2656  * __pci_enable_wake() for it.
2657  */
2658 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2659 {
2660 	if (enable && !device_may_wakeup(&pci_dev->dev))
2661 		return -EINVAL;
2662 
2663 	return __pci_enable_wake(pci_dev, state, enable);
2664 }
2665 EXPORT_SYMBOL(pci_enable_wake);
2666 
2667 /**
2668  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2669  * @dev: PCI device to prepare
2670  * @enable: True to enable wake-up event generation; false to disable
2671  *
2672  * Many drivers want the device to wake up the system from D3_hot or D3_cold
2673  * and this function allows them to set that up cleanly - pci_enable_wake()
2674  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2675  * ordering constraints.
2676  *
2677  * This function only returns error code if the device is not allowed to wake
2678  * up the system from sleep or it is not capable of generating PME# from both
2679  * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2680  */
2681 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2682 {
2683 	return pci_pme_capable(dev, PCI_D3cold) ?
2684 			pci_enable_wake(dev, PCI_D3cold, enable) :
2685 			pci_enable_wake(dev, PCI_D3hot, enable);
2686 }
2687 EXPORT_SYMBOL(pci_wake_from_d3);
2688 
2689 /**
2690  * pci_target_state - find an appropriate low power state for a given PCI dev
2691  * @dev: PCI device
2692  * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2693  *
2694  * Use underlying platform code to find a supported low power state for @dev.
2695  * If the platform can't manage @dev, return the deepest state from which it
2696  * can generate wake events, based on any available PME info.
2697  */
2698 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2699 {
2700 	if (platform_pci_power_manageable(dev)) {
2701 		/*
2702 		 * Call the platform to find the target state for the device.
2703 		 */
2704 		pci_power_t state = platform_pci_choose_state(dev);
2705 
2706 		switch (state) {
2707 		case PCI_POWER_ERROR:
2708 		case PCI_UNKNOWN:
2709 			return PCI_D3hot;
2710 
2711 		case PCI_D1:
2712 		case PCI_D2:
2713 			if (pci_no_d1d2(dev))
2714 				return PCI_D3hot;
2715 		}
2716 
2717 		return state;
2718 	}
2719 
2720 	/*
2721 	 * If the device is in D3cold even though it's not power-manageable by
2722 	 * the platform, it may have been powered down by non-standard means.
2723 	 * Best to let it slumber.
2724 	 */
2725 	if (dev->current_state == PCI_D3cold)
2726 		return PCI_D3cold;
2727 	else if (!dev->pm_cap)
2728 		return PCI_D0;
2729 
2730 	if (wakeup && dev->pme_support) {
2731 		pci_power_t state = PCI_D3hot;
2732 
2733 		/*
2734 		 * Find the deepest state from which the device can generate
2735 		 * PME#.
2736 		 */
2737 		while (state && !(dev->pme_support & (1 << state)))
2738 			state--;
2739 
2740 		if (state)
2741 			return state;
2742 		else if (dev->pme_support & 1)
2743 			return PCI_D0;
2744 	}
2745 
2746 	return PCI_D3hot;
2747 }
2748 
2749 /**
2750  * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2751  *			  into a sleep state
2752  * @dev: Device to handle.
2753  *
2754  * Choose the power state appropriate for the device depending on whether
2755  * it can wake up the system and/or is power manageable by the platform
2756  * (PCI_D3hot is the default) and put the device into that state.
2757  */
2758 int pci_prepare_to_sleep(struct pci_dev *dev)
2759 {
2760 	bool wakeup = device_may_wakeup(&dev->dev);
2761 	pci_power_t target_state = pci_target_state(dev, wakeup);
2762 	int error;
2763 
2764 	if (target_state == PCI_POWER_ERROR)
2765 		return -EIO;
2766 
2767 	pci_enable_wake(dev, target_state, wakeup);
2768 
2769 	error = pci_set_power_state(dev, target_state);
2770 
2771 	if (error)
2772 		pci_enable_wake(dev, target_state, false);
2773 
2774 	return error;
2775 }
2776 EXPORT_SYMBOL(pci_prepare_to_sleep);
2777 
2778 /**
2779  * pci_back_from_sleep - turn PCI device on during system-wide transition
2780  *			 into working state
2781  * @dev: Device to handle.
2782  *
2783  * Disable device's system wake-up capability and put it into D0.
2784  */
2785 int pci_back_from_sleep(struct pci_dev *dev)
2786 {
2787 	int ret = pci_set_power_state(dev, PCI_D0);
2788 
2789 	if (ret)
2790 		return ret;
2791 
2792 	pci_enable_wake(dev, PCI_D0, false);
2793 	return 0;
2794 }
2795 EXPORT_SYMBOL(pci_back_from_sleep);
2796 
2797 /**
2798  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2799  * @dev: PCI device being suspended.
2800  *
2801  * Prepare @dev to generate wake-up events at run time and put it into a low
2802  * power state.
2803  */
2804 int pci_finish_runtime_suspend(struct pci_dev *dev)
2805 {
2806 	pci_power_t target_state;
2807 	int error;
2808 
2809 	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2810 	if (target_state == PCI_POWER_ERROR)
2811 		return -EIO;
2812 
2813 	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2814 
2815 	error = pci_set_power_state(dev, target_state);
2816 
2817 	if (error)
2818 		pci_enable_wake(dev, target_state, false);
2819 
2820 	return error;
2821 }
2822 
2823 /**
2824  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2825  * @dev: Device to check.
2826  *
2827  * Return true if the device itself is capable of generating wake-up events
2828  * (through the platform or using the native PCIe PME) or if the device supports
2829  * PME and one of its upstream bridges can generate wake-up events.
2830  */
2831 bool pci_dev_run_wake(struct pci_dev *dev)
2832 {
2833 	struct pci_bus *bus = dev->bus;
2834 
2835 	if (!dev->pme_support)
2836 		return false;
2837 
2838 	/* PME-capable in principle, but not from the target power state */
2839 	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2840 		return false;
2841 
2842 	if (device_can_wakeup(&dev->dev))
2843 		return true;
2844 
2845 	while (bus->parent) {
2846 		struct pci_dev *bridge = bus->self;
2847 
2848 		if (device_can_wakeup(&bridge->dev))
2849 			return true;
2850 
2851 		bus = bus->parent;
2852 	}
2853 
2854 	/* We have reached the root bus. */
2855 	if (bus->bridge)
2856 		return device_can_wakeup(bus->bridge);
2857 
2858 	return false;
2859 }
2860 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2861 
2862 /**
2863  * pci_dev_need_resume - Check if it is necessary to resume the device.
2864  * @pci_dev: Device to check.
2865  *
2866  * Return 'true' if the device is not runtime-suspended or it has to be
2867  * reconfigured due to wakeup settings difference between system and runtime
2868  * suspend, or the current power state of it is not suitable for the upcoming
2869  * (system-wide) transition.
2870  */
2871 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2872 {
2873 	struct device *dev = &pci_dev->dev;
2874 	pci_power_t target_state;
2875 
2876 	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2877 		return true;
2878 
2879 	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2880 
2881 	/*
2882 	 * If the earlier platform check has not triggered, D3cold is just power
2883 	 * removal on top of D3hot, so no need to resume the device in that
2884 	 * case.
2885 	 */
2886 	return target_state != pci_dev->current_state &&
2887 		target_state != PCI_D3cold &&
2888 		pci_dev->current_state != PCI_D3hot;
2889 }
2890 
2891 /**
2892  * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2893  * @pci_dev: Device to check.
2894  *
2895  * If the device is suspended and it is not configured for system wakeup,
2896  * disable PME for it to prevent it from waking up the system unnecessarily.
2897  *
2898  * Note that if the device's power state is D3cold and the platform check in
2899  * pci_dev_need_resume() has not triggered, the device's configuration need not
2900  * be changed.
2901  */
2902 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2903 {
2904 	struct device *dev = &pci_dev->dev;
2905 
2906 	spin_lock_irq(&dev->power.lock);
2907 
2908 	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2909 	    pci_dev->current_state < PCI_D3cold)
2910 		__pci_pme_active(pci_dev, false);
2911 
2912 	spin_unlock_irq(&dev->power.lock);
2913 }
2914 
2915 /**
2916  * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2917  * @pci_dev: Device to handle.
2918  *
2919  * If the device is runtime suspended and wakeup-capable, enable PME for it as
2920  * it might have been disabled during the prepare phase of system suspend if
2921  * the device was not configured for system wakeup.
2922  */
2923 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2924 {
2925 	struct device *dev = &pci_dev->dev;
2926 
2927 	if (!pci_dev_run_wake(pci_dev))
2928 		return;
2929 
2930 	spin_lock_irq(&dev->power.lock);
2931 
2932 	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2933 		__pci_pme_active(pci_dev, true);
2934 
2935 	spin_unlock_irq(&dev->power.lock);
2936 }
2937 
2938 /**
2939  * pci_choose_state - Choose the power state of a PCI device.
2940  * @dev: Target PCI device.
2941  * @state: Target state for the whole system.
2942  *
2943  * Returns PCI power state suitable for @dev and @state.
2944  */
2945 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2946 {
2947 	if (state.event == PM_EVENT_ON)
2948 		return PCI_D0;
2949 
2950 	return pci_target_state(dev, false);
2951 }
2952 EXPORT_SYMBOL(pci_choose_state);
2953 
2954 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2955 {
2956 	struct device *dev = &pdev->dev;
2957 	struct device *parent = dev->parent;
2958 
2959 	if (parent)
2960 		pm_runtime_get_sync(parent);
2961 	pm_runtime_get_noresume(dev);
2962 	/*
2963 	 * pdev->current_state is set to PCI_D3cold during suspending,
2964 	 * so wait until suspending completes
2965 	 */
2966 	pm_runtime_barrier(dev);
2967 	/*
2968 	 * Only need to resume devices in D3cold, because config
2969 	 * registers are still accessible for devices suspended but
2970 	 * not in D3cold.
2971 	 */
2972 	if (pdev->current_state == PCI_D3cold)
2973 		pm_runtime_resume(dev);
2974 }
2975 
2976 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2977 {
2978 	struct device *dev = &pdev->dev;
2979 	struct device *parent = dev->parent;
2980 
2981 	pm_runtime_put(dev);
2982 	if (parent)
2983 		pm_runtime_put_sync(parent);
2984 }
2985 
2986 static const struct dmi_system_id bridge_d3_blacklist[] = {
2987 #ifdef CONFIG_X86
2988 	{
2989 		/*
2990 		 * Gigabyte X299 root port is not marked as hotplug capable
2991 		 * which allows Linux to power manage it.  However, this
2992 		 * confuses the BIOS SMI handler so don't power manage root
2993 		 * ports on that system.
2994 		 */
2995 		.ident = "X299 DESIGNARE EX-CF",
2996 		.matches = {
2997 			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2998 			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2999 		},
3000 	},
3001 	{
3002 		/*
3003 		 * Downstream device is not accessible after putting a root port
3004 		 * into D3cold and back into D0 on Elo Continental Z2 board
3005 		 */
3006 		.ident = "Elo Continental Z2",
3007 		.matches = {
3008 			DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
3009 			DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
3010 			DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
3011 		},
3012 	},
3013 	{
3014 		/*
3015 		 * Changing power state of root port dGPU is connected fails
3016 		 * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
3017 		 */
3018 		.ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
3019 		.matches = {
3020 			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
3021 			DMI_MATCH(DMI_BOARD_NAME, "1972"),
3022 			DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
3023 		},
3024 	},
3025 #endif
3026 	{ }
3027 };
3028 
3029 /**
3030  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
3031  * @bridge: Bridge to check
3032  *
3033  * This function checks if it is possible to move the bridge to D3.
3034  * Currently we only allow D3 for some PCIe ports and for Thunderbolt.
3035  */
3036 bool pci_bridge_d3_possible(struct pci_dev *bridge)
3037 {
3038 	if (!pci_is_pcie(bridge))
3039 		return false;
3040 
3041 	switch (pci_pcie_type(bridge)) {
3042 	case PCI_EXP_TYPE_ROOT_PORT:
3043 	case PCI_EXP_TYPE_UPSTREAM:
3044 	case PCI_EXP_TYPE_DOWNSTREAM:
3045 		if (pci_bridge_d3_disable)
3046 			return false;
3047 
3048 		/*
3049 		 * Hotplug ports handled by firmware in System Management Mode
3050 		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
3051 		 */
3052 		if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
3053 			return false;
3054 
3055 		if (pci_bridge_d3_force)
3056 			return true;
3057 
3058 		/* Even the oldest 2010 Thunderbolt controller supports D3. */
3059 		if (bridge->is_thunderbolt)
3060 			return true;
3061 
3062 		/* Platform might know better if the bridge supports D3 */
3063 		if (platform_pci_bridge_d3(bridge))
3064 			return true;
3065 
3066 		/*
3067 		 * Hotplug ports handled natively by the OS were not validated
3068 		 * by vendors for runtime D3 at least until 2018 because there
3069 		 * was no OS support.
3070 		 */
3071 		if (bridge->is_hotplug_bridge)
3072 			return false;
3073 
3074 		if (dmi_check_system(bridge_d3_blacklist))
3075 			return false;
3076 
3077 		/*
3078 		 * Out of caution, we only allow PCIe ports from 2015 or newer
3079 		 * into D3 on x86.
3080 		 */
3081 		if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015)
3082 			return true;
3083 		break;
3084 	}
3085 
3086 	return false;
3087 }
3088 
3089 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3090 {
3091 	bool *d3cold_ok = data;
3092 
3093 	if (/* The device needs to be allowed to go D3cold ... */
3094 	    dev->no_d3cold || !dev->d3cold_allowed ||
3095 
3096 	    /* ... and if it is wakeup capable to do so from D3cold. */
3097 	    (device_may_wakeup(&dev->dev) &&
3098 	     !pci_pme_capable(dev, PCI_D3cold)) ||
3099 
3100 	    /* If it is a bridge it must be allowed to go to D3. */
3101 	    !pci_power_manageable(dev))
3102 
3103 		*d3cold_ok = false;
3104 
3105 	return !*d3cold_ok;
3106 }
3107 
3108 /*
3109  * pci_bridge_d3_update - Update bridge D3 capabilities
3110  * @dev: PCI device which is changed
3111  *
3112  * Update upstream bridge PM capabilities accordingly depending on if the
3113  * device PM configuration was changed or the device is being removed.  The
3114  * change is also propagated upstream.
3115  */
3116 void pci_bridge_d3_update(struct pci_dev *dev)
3117 {
3118 	bool remove = !device_is_registered(&dev->dev);
3119 	struct pci_dev *bridge;
3120 	bool d3cold_ok = true;
3121 
3122 	bridge = pci_upstream_bridge(dev);
3123 	if (!bridge || !pci_bridge_d3_possible(bridge))
3124 		return;
3125 
3126 	/*
3127 	 * If D3 is currently allowed for the bridge, removing one of its
3128 	 * children won't change that.
3129 	 */
3130 	if (remove && bridge->bridge_d3)
3131 		return;
3132 
3133 	/*
3134 	 * If D3 is currently allowed for the bridge and a child is added or
3135 	 * changed, disallowance of D3 can only be caused by that child, so
3136 	 * we only need to check that single device, not any of its siblings.
3137 	 *
3138 	 * If D3 is currently not allowed for the bridge, checking the device
3139 	 * first may allow us to skip checking its siblings.
3140 	 */
3141 	if (!remove)
3142 		pci_dev_check_d3cold(dev, &d3cold_ok);
3143 
3144 	/*
3145 	 * If D3 is currently not allowed for the bridge, this may be caused
3146 	 * either by the device being changed/removed or any of its siblings,
3147 	 * so we need to go through all children to find out if one of them
3148 	 * continues to block D3.
3149 	 */
3150 	if (d3cold_ok && !bridge->bridge_d3)
3151 		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3152 			     &d3cold_ok);
3153 
3154 	if (bridge->bridge_d3 != d3cold_ok) {
3155 		bridge->bridge_d3 = d3cold_ok;
3156 		/* Propagate change to upstream bridges */
3157 		pci_bridge_d3_update(bridge);
3158 	}
3159 }
3160 
3161 /**
3162  * pci_d3cold_enable - Enable D3cold for device
3163  * @dev: PCI device to handle
3164  *
3165  * This function can be used in drivers to enable D3cold from the device
3166  * they handle.  It also updates upstream PCI bridge PM capabilities
3167  * accordingly.
3168  */
3169 void pci_d3cold_enable(struct pci_dev *dev)
3170 {
3171 	if (dev->no_d3cold) {
3172 		dev->no_d3cold = false;
3173 		pci_bridge_d3_update(dev);
3174 	}
3175 }
3176 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3177 
3178 /**
3179  * pci_d3cold_disable - Disable D3cold for device
3180  * @dev: PCI device to handle
3181  *
3182  * This function can be used in drivers to disable D3cold from the device
3183  * they handle.  It also updates upstream PCI bridge PM capabilities
3184  * accordingly.
3185  */
3186 void pci_d3cold_disable(struct pci_dev *dev)
3187 {
3188 	if (!dev->no_d3cold) {
3189 		dev->no_d3cold = true;
3190 		pci_bridge_d3_update(dev);
3191 	}
3192 }
3193 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3194 
3195 /**
3196  * pci_pm_init - Initialize PM functions of given PCI device
3197  * @dev: PCI device to handle.
3198  */
3199 void pci_pm_init(struct pci_dev *dev)
3200 {
3201 	int pm;
3202 	u16 status;
3203 	u16 pmc;
3204 
3205 	pm_runtime_forbid(&dev->dev);
3206 	pm_runtime_set_active(&dev->dev);
3207 	pm_runtime_enable(&dev->dev);
3208 	device_enable_async_suspend(&dev->dev);
3209 	dev->wakeup_prepared = false;
3210 
3211 	dev->pm_cap = 0;
3212 	dev->pme_support = 0;
3213 
3214 	/* find PCI PM capability in list */
3215 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3216 	if (!pm)
3217 		return;
3218 	/* Check device's ability to generate PME# */
3219 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3220 
3221 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3222 		pci_err(dev, "unsupported PM cap regs version (%u)\n",
3223 			pmc & PCI_PM_CAP_VER_MASK);
3224 		return;
3225 	}
3226 
3227 	dev->pm_cap = pm;
3228 	dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3229 	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3230 	dev->bridge_d3 = pci_bridge_d3_possible(dev);
3231 	dev->d3cold_allowed = true;
3232 
3233 	dev->d1_support = false;
3234 	dev->d2_support = false;
3235 	if (!pci_no_d1d2(dev)) {
3236 		if (pmc & PCI_PM_CAP_D1)
3237 			dev->d1_support = true;
3238 		if (pmc & PCI_PM_CAP_D2)
3239 			dev->d2_support = true;
3240 
3241 		if (dev->d1_support || dev->d2_support)
3242 			pci_info(dev, "supports%s%s\n",
3243 				   dev->d1_support ? " D1" : "",
3244 				   dev->d2_support ? " D2" : "");
3245 	}
3246 
3247 	pmc &= PCI_PM_CAP_PME_MASK;
3248 	if (pmc) {
3249 		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3250 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3251 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3252 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3253 			 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3254 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3255 		dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
3256 		dev->pme_poll = true;
3257 		/*
3258 		 * Make device's PM flags reflect the wake-up capability, but
3259 		 * let the user space enable it to wake up the system as needed.
3260 		 */
3261 		device_set_wakeup_capable(&dev->dev, true);
3262 		/* Disable the PME# generation functionality */
3263 		pci_pme_active(dev, false);
3264 	}
3265 
3266 	pci_read_config_word(dev, PCI_STATUS, &status);
3267 	if (status & PCI_STATUS_IMM_READY)
3268 		dev->imm_ready = 1;
3269 }
3270 
3271 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3272 {
3273 	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3274 
3275 	switch (prop) {
3276 	case PCI_EA_P_MEM:
3277 	case PCI_EA_P_VF_MEM:
3278 		flags |= IORESOURCE_MEM;
3279 		break;
3280 	case PCI_EA_P_MEM_PREFETCH:
3281 	case PCI_EA_P_VF_MEM_PREFETCH:
3282 		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3283 		break;
3284 	case PCI_EA_P_IO:
3285 		flags |= IORESOURCE_IO;
3286 		break;
3287 	default:
3288 		return 0;
3289 	}
3290 
3291 	return flags;
3292 }
3293 
3294 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3295 					    u8 prop)
3296 {
3297 	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3298 		return &dev->resource[bei];
3299 #ifdef CONFIG_PCI_IOV
3300 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3301 		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3302 		return &dev->resource[PCI_IOV_RESOURCES +
3303 				      bei - PCI_EA_BEI_VF_BAR0];
3304 #endif
3305 	else if (bei == PCI_EA_BEI_ROM)
3306 		return &dev->resource[PCI_ROM_RESOURCE];
3307 	else
3308 		return NULL;
3309 }
3310 
3311 /* Read an Enhanced Allocation (EA) entry */
3312 static int pci_ea_read(struct pci_dev *dev, int offset)
3313 {
3314 	struct resource *res;
3315 	const char *res_name;
3316 	int ent_size, ent_offset = offset;
3317 	resource_size_t start, end;
3318 	unsigned long flags;
3319 	u32 dw0, bei, base, max_offset;
3320 	u8 prop;
3321 	bool support_64 = (sizeof(resource_size_t) >= 8);
3322 
3323 	pci_read_config_dword(dev, ent_offset, &dw0);
3324 	ent_offset += 4;
3325 
3326 	/* Entry size field indicates DWORDs after 1st */
3327 	ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2;
3328 
3329 	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3330 		goto out;
3331 
3332 	bei = FIELD_GET(PCI_EA_BEI, dw0);
3333 	prop = FIELD_GET(PCI_EA_PP, dw0);
3334 
3335 	/*
3336 	 * If the Property is in the reserved range, try the Secondary
3337 	 * Property instead.
3338 	 */
3339 	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3340 		prop = FIELD_GET(PCI_EA_SP, dw0);
3341 	if (prop > PCI_EA_P_BRIDGE_IO)
3342 		goto out;
3343 
3344 	res = pci_ea_get_resource(dev, bei, prop);
3345 	res_name = pci_resource_name(dev, bei);
3346 	if (!res) {
3347 		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3348 		goto out;
3349 	}
3350 
3351 	flags = pci_ea_flags(dev, prop);
3352 	if (!flags) {
3353 		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3354 		goto out;
3355 	}
3356 
3357 	/* Read Base */
3358 	pci_read_config_dword(dev, ent_offset, &base);
3359 	start = (base & PCI_EA_FIELD_MASK);
3360 	ent_offset += 4;
3361 
3362 	/* Read MaxOffset */
3363 	pci_read_config_dword(dev, ent_offset, &max_offset);
3364 	ent_offset += 4;
3365 
3366 	/* Read Base MSBs (if 64-bit entry) */
3367 	if (base & PCI_EA_IS_64) {
3368 		u32 base_upper;
3369 
3370 		pci_read_config_dword(dev, ent_offset, &base_upper);
3371 		ent_offset += 4;
3372 
3373 		flags |= IORESOURCE_MEM_64;
3374 
3375 		/* entry starts above 32-bit boundary, can't use */
3376 		if (!support_64 && base_upper)
3377 			goto out;
3378 
3379 		if (support_64)
3380 			start |= ((u64)base_upper << 32);
3381 	}
3382 
3383 	end = start + (max_offset | 0x03);
3384 
3385 	/* Read MaxOffset MSBs (if 64-bit entry) */
3386 	if (max_offset & PCI_EA_IS_64) {
3387 		u32 max_offset_upper;
3388 
3389 		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3390 		ent_offset += 4;
3391 
3392 		flags |= IORESOURCE_MEM_64;
3393 
3394 		/* entry too big, can't use */
3395 		if (!support_64 && max_offset_upper)
3396 			goto out;
3397 
3398 		if (support_64)
3399 			end += ((u64)max_offset_upper << 32);
3400 	}
3401 
3402 	if (end < start) {
3403 		pci_err(dev, "EA Entry crosses address boundary\n");
3404 		goto out;
3405 	}
3406 
3407 	if (ent_size != ent_offset - offset) {
3408 		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3409 			ent_size, ent_offset - offset);
3410 		goto out;
3411 	}
3412 
3413 	res->name = pci_name(dev);
3414 	res->start = start;
3415 	res->end = end;
3416 	res->flags = flags;
3417 
3418 	if (bei <= PCI_EA_BEI_BAR5)
3419 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3420 			 res_name, res, prop);
3421 	else if (bei == PCI_EA_BEI_ROM)
3422 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3423 			 res_name, res, prop);
3424 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3425 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3426 			 res_name, res, prop);
3427 	else
3428 		pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
3429 			   bei, res, prop);
3430 
3431 out:
3432 	return offset + ent_size;
3433 }
3434 
3435 /* Enhanced Allocation Initialization */
3436 void pci_ea_init(struct pci_dev *dev)
3437 {
3438 	int ea;
3439 	u8 num_ent;
3440 	int offset;
3441 	int i;
3442 
3443 	/* find PCI EA capability in list */
3444 	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3445 	if (!ea)
3446 		return;
3447 
3448 	/* determine the number of entries */
3449 	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3450 					&num_ent);
3451 	num_ent &= PCI_EA_NUM_ENT_MASK;
3452 
3453 	offset = ea + PCI_EA_FIRST_ENT;
3454 
3455 	/* Skip DWORD 2 for type 1 functions */
3456 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3457 		offset += 4;
3458 
3459 	/* parse each EA entry */
3460 	for (i = 0; i < num_ent; ++i)
3461 		offset = pci_ea_read(dev, offset);
3462 }
3463 
3464 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3465 	struct pci_cap_saved_state *new_cap)
3466 {
3467 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3468 }
3469 
3470 /**
3471  * _pci_add_cap_save_buffer - allocate buffer for saving given
3472  *			      capability registers
3473  * @dev: the PCI device
3474  * @cap: the capability to allocate the buffer for
3475  * @extended: Standard or Extended capability ID
3476  * @size: requested size of the buffer
3477  */
3478 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3479 				    bool extended, unsigned int size)
3480 {
3481 	int pos;
3482 	struct pci_cap_saved_state *save_state;
3483 
3484 	if (extended)
3485 		pos = pci_find_ext_capability(dev, cap);
3486 	else
3487 		pos = pci_find_capability(dev, cap);
3488 
3489 	if (!pos)
3490 		return 0;
3491 
3492 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3493 	if (!save_state)
3494 		return -ENOMEM;
3495 
3496 	save_state->cap.cap_nr = cap;
3497 	save_state->cap.cap_extended = extended;
3498 	save_state->cap.size = size;
3499 	pci_add_saved_cap(dev, save_state);
3500 
3501 	return 0;
3502 }
3503 
3504 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3505 {
3506 	return _pci_add_cap_save_buffer(dev, cap, false, size);
3507 }
3508 
3509 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3510 {
3511 	return _pci_add_cap_save_buffer(dev, cap, true, size);
3512 }
3513 
3514 /**
3515  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3516  * @dev: the PCI device
3517  */
3518 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3519 {
3520 	int error;
3521 
3522 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3523 					PCI_EXP_SAVE_REGS * sizeof(u16));
3524 	if (error)
3525 		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3526 
3527 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3528 	if (error)
3529 		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3530 
3531 	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3532 					    2 * sizeof(u16));
3533 	if (error)
3534 		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3535 
3536 	pci_allocate_vc_save_buffers(dev);
3537 }
3538 
3539 void pci_free_cap_save_buffers(struct pci_dev *dev)
3540 {
3541 	struct pci_cap_saved_state *tmp;
3542 	struct hlist_node *n;
3543 
3544 	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3545 		kfree(tmp);
3546 }
3547 
3548 /**
3549  * pci_configure_ari - enable or disable ARI forwarding
3550  * @dev: the PCI device
3551  *
3552  * If @dev and its upstream bridge both support ARI, enable ARI in the
3553  * bridge.  Otherwise, disable ARI in the bridge.
3554  */
3555 void pci_configure_ari(struct pci_dev *dev)
3556 {
3557 	u32 cap;
3558 	struct pci_dev *bridge;
3559 
3560 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3561 		return;
3562 
3563 	bridge = dev->bus->self;
3564 	if (!bridge)
3565 		return;
3566 
3567 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3568 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3569 		return;
3570 
3571 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3572 		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3573 					 PCI_EXP_DEVCTL2_ARI);
3574 		bridge->ari_enabled = 1;
3575 	} else {
3576 		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3577 					   PCI_EXP_DEVCTL2_ARI);
3578 		bridge->ari_enabled = 0;
3579 	}
3580 }
3581 
3582 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3583 {
3584 	int pos;
3585 	u16 cap, ctrl;
3586 
3587 	pos = pdev->acs_cap;
3588 	if (!pos)
3589 		return false;
3590 
3591 	/*
3592 	 * Except for egress control, capabilities are either required
3593 	 * or only required if controllable.  Features missing from the
3594 	 * capability field can therefore be assumed as hard-wired enabled.
3595 	 */
3596 	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3597 	acs_flags &= (cap | PCI_ACS_EC);
3598 
3599 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3600 	return (ctrl & acs_flags) == acs_flags;
3601 }
3602 
3603 /**
3604  * pci_acs_enabled - test ACS against required flags for a given device
3605  * @pdev: device to test
3606  * @acs_flags: required PCI ACS flags
3607  *
3608  * Return true if the device supports the provided flags.  Automatically
3609  * filters out flags that are not implemented on multifunction devices.
3610  *
3611  * Note that this interface checks the effective ACS capabilities of the
3612  * device rather than the actual capabilities.  For instance, most single
3613  * function endpoints are not required to support ACS because they have no
3614  * opportunity for peer-to-peer access.  We therefore return 'true'
3615  * regardless of whether the device exposes an ACS capability.  This makes
3616  * it much easier for callers of this function to ignore the actual type
3617  * or topology of the device when testing ACS support.
3618  */
3619 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3620 {
3621 	int ret;
3622 
3623 	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3624 	if (ret >= 0)
3625 		return ret > 0;
3626 
3627 	/*
3628 	 * Conventional PCI and PCI-X devices never support ACS, either
3629 	 * effectively or actually.  The shared bus topology implies that
3630 	 * any device on the bus can receive or snoop DMA.
3631 	 */
3632 	if (!pci_is_pcie(pdev))
3633 		return false;
3634 
3635 	switch (pci_pcie_type(pdev)) {
3636 	/*
3637 	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3638 	 * but since their primary interface is PCI/X, we conservatively
3639 	 * handle them as we would a non-PCIe device.
3640 	 */
3641 	case PCI_EXP_TYPE_PCIE_BRIDGE:
3642 	/*
3643 	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3644 	 * applicable... must never implement an ACS Extended Capability...".
3645 	 * This seems arbitrary, but we take a conservative interpretation
3646 	 * of this statement.
3647 	 */
3648 	case PCI_EXP_TYPE_PCI_BRIDGE:
3649 	case PCI_EXP_TYPE_RC_EC:
3650 		return false;
3651 	/*
3652 	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3653 	 * implement ACS in order to indicate their peer-to-peer capabilities,
3654 	 * regardless of whether they are single- or multi-function devices.
3655 	 */
3656 	case PCI_EXP_TYPE_DOWNSTREAM:
3657 	case PCI_EXP_TYPE_ROOT_PORT:
3658 		return pci_acs_flags_enabled(pdev, acs_flags);
3659 	/*
3660 	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3661 	 * implemented by the remaining PCIe types to indicate peer-to-peer
3662 	 * capabilities, but only when they are part of a multifunction
3663 	 * device.  The footnote for section 6.12 indicates the specific
3664 	 * PCIe types included here.
3665 	 */
3666 	case PCI_EXP_TYPE_ENDPOINT:
3667 	case PCI_EXP_TYPE_UPSTREAM:
3668 	case PCI_EXP_TYPE_LEG_END:
3669 	case PCI_EXP_TYPE_RC_END:
3670 		if (!pdev->multifunction)
3671 			break;
3672 
3673 		return pci_acs_flags_enabled(pdev, acs_flags);
3674 	}
3675 
3676 	/*
3677 	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3678 	 * to single function devices with the exception of downstream ports.
3679 	 */
3680 	return true;
3681 }
3682 
3683 /**
3684  * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3685  * @start: starting downstream device
3686  * @end: ending upstream device or NULL to search to the root bus
3687  * @acs_flags: required flags
3688  *
3689  * Walk up a device tree from start to end testing PCI ACS support.  If
3690  * any step along the way does not support the required flags, return false.
3691  */
3692 bool pci_acs_path_enabled(struct pci_dev *start,
3693 			  struct pci_dev *end, u16 acs_flags)
3694 {
3695 	struct pci_dev *pdev, *parent = start;
3696 
3697 	do {
3698 		pdev = parent;
3699 
3700 		if (!pci_acs_enabled(pdev, acs_flags))
3701 			return false;
3702 
3703 		if (pci_is_root_bus(pdev->bus))
3704 			return (end == NULL);
3705 
3706 		parent = pdev->bus->self;
3707 	} while (pdev != end);
3708 
3709 	return true;
3710 }
3711 
3712 /**
3713  * pci_acs_init - Initialize ACS if hardware supports it
3714  * @dev: the PCI device
3715  */
3716 void pci_acs_init(struct pci_dev *dev)
3717 {
3718 	dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3719 
3720 	/*
3721 	 * Attempt to enable ACS regardless of capability because some Root
3722 	 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3723 	 * the standard ACS capability but still support ACS via those
3724 	 * quirks.
3725 	 */
3726 	pci_enable_acs(dev);
3727 }
3728 
3729 void pci_rebar_init(struct pci_dev *pdev)
3730 {
3731 	pdev->rebar_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3732 }
3733 
3734 /**
3735  * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3736  * @pdev: PCI device
3737  * @bar: BAR to find
3738  *
3739  * Helper to find the position of the ctrl register for a BAR.
3740  * Returns -ENOTSUPP if resizable BARs are not supported at all.
3741  * Returns -ENOENT if no ctrl register for the BAR could be found.
3742  */
3743 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3744 {
3745 	unsigned int pos, nbars, i;
3746 	u32 ctrl;
3747 
3748 	pos = pdev->rebar_cap;
3749 	if (!pos)
3750 		return -ENOTSUPP;
3751 
3752 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3753 	nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
3754 
3755 	for (i = 0; i < nbars; i++, pos += 8) {
3756 		int bar_idx;
3757 
3758 		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3759 		bar_idx = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, ctrl);
3760 		if (bar_idx == bar)
3761 			return pos;
3762 	}
3763 
3764 	return -ENOENT;
3765 }
3766 
3767 /**
3768  * pci_rebar_get_possible_sizes - get possible sizes for BAR
3769  * @pdev: PCI device
3770  * @bar: BAR to query
3771  *
3772  * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3773  * (bit 0=1MB, bit 31=128TB). Returns 0 if BAR isn't resizable.
3774  */
3775 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3776 {
3777 	int pos;
3778 	u32 cap;
3779 
3780 	pos = pci_rebar_find_pos(pdev, bar);
3781 	if (pos < 0)
3782 		return 0;
3783 
3784 	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3785 	cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
3786 
3787 	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3788 	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3789 	    bar == 0 && cap == 0x700)
3790 		return 0x3f00;
3791 
3792 	return cap;
3793 }
3794 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3795 
3796 /**
3797  * pci_rebar_get_current_size - get the current size of a BAR
3798  * @pdev: PCI device
3799  * @bar: BAR to set size to
3800  *
3801  * Read the size of a BAR from the resizable BAR config.
3802  * Returns size if found or negative error code.
3803  */
3804 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3805 {
3806 	int pos;
3807 	u32 ctrl;
3808 
3809 	pos = pci_rebar_find_pos(pdev, bar);
3810 	if (pos < 0)
3811 		return pos;
3812 
3813 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3814 	return FIELD_GET(PCI_REBAR_CTRL_BAR_SIZE, ctrl);
3815 }
3816 
3817 /**
3818  * pci_rebar_set_size - set a new size for a BAR
3819  * @pdev: PCI device
3820  * @bar: BAR to set size to
3821  * @size: new size as defined in the spec (0=1MB, 31=128TB)
3822  *
3823  * Set the new size of a BAR as defined in the spec.
3824  * Returns zero if resizing was successful, error code otherwise.
3825  */
3826 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3827 {
3828 	int pos;
3829 	u32 ctrl;
3830 
3831 	pos = pci_rebar_find_pos(pdev, bar);
3832 	if (pos < 0)
3833 		return pos;
3834 
3835 	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3836 	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3837 	ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
3838 	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3839 	return 0;
3840 }
3841 
3842 /**
3843  * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3844  * @dev: the PCI device
3845  * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3846  *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3847  *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3848  *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3849  *
3850  * Return 0 if all upstream bridges support AtomicOp routing, egress
3851  * blocking is disabled on all upstream ports, and the root port supports
3852  * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3853  * AtomicOp completion), or negative otherwise.
3854  */
3855 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3856 {
3857 	struct pci_bus *bus = dev->bus;
3858 	struct pci_dev *bridge;
3859 	u32 cap, ctl2;
3860 
3861 	/*
3862 	 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3863 	 * in Device Control 2 is reserved in VFs and the PF value applies
3864 	 * to all associated VFs.
3865 	 */
3866 	if (dev->is_virtfn)
3867 		return -EINVAL;
3868 
3869 	if (!pci_is_pcie(dev))
3870 		return -EINVAL;
3871 
3872 	/*
3873 	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3874 	 * AtomicOp requesters.  For now, we only support endpoints as
3875 	 * requesters and root ports as completers.  No endpoints as
3876 	 * completers, and no peer-to-peer.
3877 	 */
3878 
3879 	switch (pci_pcie_type(dev)) {
3880 	case PCI_EXP_TYPE_ENDPOINT:
3881 	case PCI_EXP_TYPE_LEG_END:
3882 	case PCI_EXP_TYPE_RC_END:
3883 		break;
3884 	default:
3885 		return -EINVAL;
3886 	}
3887 
3888 	while (bus->parent) {
3889 		bridge = bus->self;
3890 
3891 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3892 
3893 		switch (pci_pcie_type(bridge)) {
3894 		/* Ensure switch ports support AtomicOp routing */
3895 		case PCI_EXP_TYPE_UPSTREAM:
3896 		case PCI_EXP_TYPE_DOWNSTREAM:
3897 			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3898 				return -EINVAL;
3899 			break;
3900 
3901 		/* Ensure root port supports all the sizes we care about */
3902 		case PCI_EXP_TYPE_ROOT_PORT:
3903 			if ((cap & cap_mask) != cap_mask)
3904 				return -EINVAL;
3905 			break;
3906 		}
3907 
3908 		/* Ensure upstream ports don't block AtomicOps on egress */
3909 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3910 			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3911 						   &ctl2);
3912 			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3913 				return -EINVAL;
3914 		}
3915 
3916 		bus = bus->parent;
3917 	}
3918 
3919 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3920 				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3921 	return 0;
3922 }
3923 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3924 
3925 /**
3926  * pci_release_region - Release a PCI bar
3927  * @pdev: PCI device whose resources were previously reserved by
3928  *	  pci_request_region()
3929  * @bar: BAR to release
3930  *
3931  * Releases the PCI I/O and memory resources previously reserved by a
3932  * successful call to pci_request_region().  Call this function only
3933  * after all use of the PCI regions has ceased.
3934  */
3935 void pci_release_region(struct pci_dev *pdev, int bar)
3936 {
3937 	if (!pci_bar_index_is_valid(bar))
3938 		return;
3939 
3940 	/*
3941 	 * This is done for backwards compatibility, because the old PCI devres
3942 	 * API had a mode in which the function became managed if it had been
3943 	 * enabled with pcim_enable_device() instead of pci_enable_device().
3944 	 */
3945 	if (pci_is_managed(pdev)) {
3946 		pcim_release_region(pdev, bar);
3947 		return;
3948 	}
3949 
3950 	if (pci_resource_len(pdev, bar) == 0)
3951 		return;
3952 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3953 		release_region(pci_resource_start(pdev, bar),
3954 				pci_resource_len(pdev, bar));
3955 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3956 		release_mem_region(pci_resource_start(pdev, bar),
3957 				pci_resource_len(pdev, bar));
3958 }
3959 EXPORT_SYMBOL(pci_release_region);
3960 
3961 /**
3962  * __pci_request_region - Reserved PCI I/O and memory resource
3963  * @pdev: PCI device whose resources are to be reserved
3964  * @bar: BAR to be reserved
3965  * @name: name of the driver requesting the resource
3966  * @exclusive: whether the region access is exclusive or not
3967  *
3968  * Returns: 0 on success, negative error code on failure.
3969  *
3970  * Mark the PCI region associated with PCI device @pdev BAR @bar as being
3971  * reserved by owner @name. Do not access any address inside the PCI regions
3972  * unless this call returns successfully.
3973  *
3974  * If @exclusive is set, then the region is marked so that userspace
3975  * is explicitly not allowed to map the resource via /dev/mem or
3976  * sysfs MMIO access.
3977  *
3978  * Returns 0 on success, or %EBUSY on error.  A warning
3979  * message is also printed on failure.
3980  */
3981 static int __pci_request_region(struct pci_dev *pdev, int bar,
3982 				const char *name, int exclusive)
3983 {
3984 	if (!pci_bar_index_is_valid(bar))
3985 		return -EINVAL;
3986 
3987 	if (pci_is_managed(pdev)) {
3988 		if (exclusive == IORESOURCE_EXCLUSIVE)
3989 			return pcim_request_region_exclusive(pdev, bar, name);
3990 
3991 		return pcim_request_region(pdev, bar, name);
3992 	}
3993 
3994 	if (pci_resource_len(pdev, bar) == 0)
3995 		return 0;
3996 
3997 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3998 		if (!request_region(pci_resource_start(pdev, bar),
3999 			    pci_resource_len(pdev, bar), name))
4000 			goto err_out;
4001 	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
4002 		if (!__request_mem_region(pci_resource_start(pdev, bar),
4003 					pci_resource_len(pdev, bar), name,
4004 					exclusive))
4005 			goto err_out;
4006 	}
4007 
4008 	return 0;
4009 
4010 err_out:
4011 	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
4012 		 &pdev->resource[bar]);
4013 	return -EBUSY;
4014 }
4015 
4016 /**
4017  * pci_request_region - Reserve PCI I/O and memory resource
4018  * @pdev: PCI device whose resources are to be reserved
4019  * @bar: BAR to be reserved
4020  * @name: name of the driver requesting the resource
4021  *
4022  * Returns: 0 on success, negative error code on failure.
4023  *
4024  * Mark the PCI region associated with PCI device @pdev BAR @bar as being
4025  * reserved by owner @name. Do not access any address inside the PCI regions
4026  * unless this call returns successfully.
4027  *
4028  * Returns 0 on success, or %EBUSY on error.  A warning
4029  * message is also printed on failure.
4030  *
4031  * NOTE:
4032  * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4033  * when pcim_enable_device() has been called in advance. This hybrid feature is
4034  * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4035  */
4036 int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
4037 {
4038 	return __pci_request_region(pdev, bar, name, 0);
4039 }
4040 EXPORT_SYMBOL(pci_request_region);
4041 
4042 /**
4043  * pci_release_selected_regions - Release selected PCI I/O and memory resources
4044  * @pdev: PCI device whose resources were previously reserved
4045  * @bars: Bitmask of BARs to be released
4046  *
4047  * Release selected PCI I/O and memory resources previously reserved.
4048  * Call this function only after all use of the PCI regions has ceased.
4049  */
4050 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
4051 {
4052 	int i;
4053 
4054 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
4055 		if (bars & (1 << i))
4056 			pci_release_region(pdev, i);
4057 }
4058 EXPORT_SYMBOL(pci_release_selected_regions);
4059 
4060 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4061 					  const char *name, int excl)
4062 {
4063 	int i;
4064 
4065 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
4066 		if (bars & (1 << i))
4067 			if (__pci_request_region(pdev, i, name, excl))
4068 				goto err_out;
4069 	return 0;
4070 
4071 err_out:
4072 	while (--i >= 0)
4073 		if (bars & (1 << i))
4074 			pci_release_region(pdev, i);
4075 
4076 	return -EBUSY;
4077 }
4078 
4079 
4080 /**
4081  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4082  * @pdev: PCI device whose resources are to be reserved
4083  * @bars: Bitmask of BARs to be requested
4084  * @name: Name of the driver requesting the resources
4085  *
4086  * Returns: 0 on success, negative error code on failure.
4087  *
4088  * NOTE:
4089  * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4090  * when pcim_enable_device() has been called in advance. This hybrid feature is
4091  * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4092  */
4093 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4094 				 const char *name)
4095 {
4096 	return __pci_request_selected_regions(pdev, bars, name, 0);
4097 }
4098 EXPORT_SYMBOL(pci_request_selected_regions);
4099 
4100 /**
4101  * pci_request_selected_regions_exclusive - Request regions exclusively
4102  * @pdev: PCI device to request regions from
4103  * @bars: bit mask of BARs to request
4104  * @name: name of the driver requesting the resources
4105  *
4106  * Returns: 0 on success, negative error code on failure.
4107  *
4108  * NOTE:
4109  * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4110  * when pcim_enable_device() has been called in advance. This hybrid feature is
4111  * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4112  */
4113 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4114 					   const char *name)
4115 {
4116 	return __pci_request_selected_regions(pdev, bars, name,
4117 			IORESOURCE_EXCLUSIVE);
4118 }
4119 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4120 
4121 /**
4122  * pci_release_regions - Release reserved PCI I/O and memory resources
4123  * @pdev: PCI device whose resources were previously reserved by
4124  *	  pci_request_regions()
4125  *
4126  * Releases all PCI I/O and memory resources previously reserved by a
4127  * successful call to pci_request_regions().  Call this function only
4128  * after all use of the PCI regions has ceased.
4129  */
4130 void pci_release_regions(struct pci_dev *pdev)
4131 {
4132 	pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4133 }
4134 EXPORT_SYMBOL(pci_release_regions);
4135 
4136 /**
4137  * pci_request_regions - Reserve PCI I/O and memory resources
4138  * @pdev: PCI device whose resources are to be reserved
4139  * @name: name of the driver requesting the resources
4140  *
4141  * Mark all PCI regions associated with PCI device @pdev as being reserved by
4142  * owner @name. Do not access any address inside the PCI regions unless this
4143  * call returns successfully.
4144  *
4145  * Returns 0 on success, or %EBUSY on error.  A warning
4146  * message is also printed on failure.
4147  *
4148  * NOTE:
4149  * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4150  * when pcim_enable_device() has been called in advance. This hybrid feature is
4151  * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4152  */
4153 int pci_request_regions(struct pci_dev *pdev, const char *name)
4154 {
4155 	return pci_request_selected_regions(pdev,
4156 			((1 << PCI_STD_NUM_BARS) - 1), name);
4157 }
4158 EXPORT_SYMBOL(pci_request_regions);
4159 
4160 /**
4161  * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4162  * @pdev: PCI device whose resources are to be reserved
4163  * @name: name of the driver requesting the resources
4164  *
4165  * Returns: 0 on success, negative error code on failure.
4166  *
4167  * Mark all PCI regions associated with PCI device @pdev as being reserved
4168  * by owner @name. Do not access any address inside the PCI regions
4169  * unless this call returns successfully.
4170  *
4171  * pci_request_regions_exclusive() will mark the region so that /dev/mem
4172  * and the sysfs MMIO access will not be allowed.
4173  *
4174  * Returns 0 on success, or %EBUSY on error.  A warning message is also
4175  * printed on failure.
4176  *
4177  * NOTE:
4178  * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4179  * when pcim_enable_device() has been called in advance. This hybrid feature is
4180  * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4181  */
4182 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
4183 {
4184 	return pci_request_selected_regions_exclusive(pdev,
4185 				((1 << PCI_STD_NUM_BARS) - 1), name);
4186 }
4187 EXPORT_SYMBOL(pci_request_regions_exclusive);
4188 
4189 /*
4190  * Record the PCI IO range (expressed as CPU physical address + size).
4191  * Return a negative value if an error has occurred, zero otherwise
4192  */
4193 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
4194 			resource_size_t	size)
4195 {
4196 	int ret = 0;
4197 #ifdef PCI_IOBASE
4198 	struct logic_pio_hwaddr *range;
4199 
4200 	if (!size || addr + size < addr)
4201 		return -EINVAL;
4202 
4203 	range = kzalloc(sizeof(*range), GFP_ATOMIC);
4204 	if (!range)
4205 		return -ENOMEM;
4206 
4207 	range->fwnode = fwnode;
4208 	range->size = size;
4209 	range->hw_start = addr;
4210 	range->flags = LOGIC_PIO_CPU_MMIO;
4211 
4212 	ret = logic_pio_register_range(range);
4213 	if (ret)
4214 		kfree(range);
4215 
4216 	/* Ignore duplicates due to deferred probing */
4217 	if (ret == -EEXIST)
4218 		ret = 0;
4219 #endif
4220 
4221 	return ret;
4222 }
4223 
4224 phys_addr_t pci_pio_to_address(unsigned long pio)
4225 {
4226 #ifdef PCI_IOBASE
4227 	if (pio < MMIO_UPPER_LIMIT)
4228 		return logic_pio_to_hwaddr(pio);
4229 #endif
4230 
4231 	return (phys_addr_t) OF_BAD_ADDR;
4232 }
4233 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4234 
4235 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4236 {
4237 #ifdef PCI_IOBASE
4238 	return logic_pio_trans_cpuaddr(address);
4239 #else
4240 	if (address > IO_SPACE_LIMIT)
4241 		return (unsigned long)-1;
4242 
4243 	return (unsigned long) address;
4244 #endif
4245 }
4246 
4247 /**
4248  * pci_remap_iospace - Remap the memory mapped I/O space
4249  * @res: Resource describing the I/O space
4250  * @phys_addr: physical address of range to be mapped
4251  *
4252  * Remap the memory mapped I/O space described by the @res and the CPU
4253  * physical address @phys_addr into virtual address space.  Only
4254  * architectures that have memory mapped IO functions defined (and the
4255  * PCI_IOBASE value defined) should call this function.
4256  */
4257 #ifndef pci_remap_iospace
4258 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4259 {
4260 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4261 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4262 
4263 	if (!(res->flags & IORESOURCE_IO))
4264 		return -EINVAL;
4265 
4266 	if (res->end > IO_SPACE_LIMIT)
4267 		return -EINVAL;
4268 
4269 	return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4270 			       pgprot_device(PAGE_KERNEL));
4271 #else
4272 	/*
4273 	 * This architecture does not have memory mapped I/O space,
4274 	 * so this function should never be called
4275 	 */
4276 	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4277 	return -ENODEV;
4278 #endif
4279 }
4280 EXPORT_SYMBOL(pci_remap_iospace);
4281 #endif
4282 
4283 /**
4284  * pci_unmap_iospace - Unmap the memory mapped I/O space
4285  * @res: resource to be unmapped
4286  *
4287  * Unmap the CPU virtual address @res from virtual address space.  Only
4288  * architectures that have memory mapped IO functions defined (and the
4289  * PCI_IOBASE value defined) should call this function.
4290  */
4291 void pci_unmap_iospace(struct resource *res)
4292 {
4293 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4294 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4295 
4296 	vunmap_range(vaddr, vaddr + resource_size(res));
4297 #endif
4298 }
4299 EXPORT_SYMBOL(pci_unmap_iospace);
4300 
4301 static void __pci_set_master(struct pci_dev *dev, bool enable)
4302 {
4303 	u16 old_cmd, cmd;
4304 
4305 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4306 	if (enable)
4307 		cmd = old_cmd | PCI_COMMAND_MASTER;
4308 	else
4309 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4310 	if (cmd != old_cmd) {
4311 		pci_dbg(dev, "%s bus mastering\n",
4312 			enable ? "enabling" : "disabling");
4313 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4314 	}
4315 	dev->is_busmaster = enable;
4316 }
4317 
4318 /**
4319  * pcibios_setup - process "pci=" kernel boot arguments
4320  * @str: string used to pass in "pci=" kernel boot arguments
4321  *
4322  * Process kernel boot arguments.  This is the default implementation.
4323  * Architecture specific implementations can override this as necessary.
4324  */
4325 char * __weak __init pcibios_setup(char *str)
4326 {
4327 	return str;
4328 }
4329 
4330 /**
4331  * pcibios_set_master - enable PCI bus-mastering for device dev
4332  * @dev: the PCI device to enable
4333  *
4334  * Enables PCI bus-mastering for the device.  This is the default
4335  * implementation.  Architecture specific implementations can override
4336  * this if necessary.
4337  */
4338 void __weak pcibios_set_master(struct pci_dev *dev)
4339 {
4340 	u8 lat;
4341 
4342 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4343 	if (pci_is_pcie(dev))
4344 		return;
4345 
4346 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4347 	if (lat < 16)
4348 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4349 	else if (lat > pcibios_max_latency)
4350 		lat = pcibios_max_latency;
4351 	else
4352 		return;
4353 
4354 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4355 }
4356 
4357 /**
4358  * pci_set_master - enables bus-mastering for device dev
4359  * @dev: the PCI device to enable
4360  *
4361  * Enables bus-mastering on the device and calls pcibios_set_master()
4362  * to do the needed arch specific settings.
4363  */
4364 void pci_set_master(struct pci_dev *dev)
4365 {
4366 	__pci_set_master(dev, true);
4367 	pcibios_set_master(dev);
4368 }
4369 EXPORT_SYMBOL(pci_set_master);
4370 
4371 /**
4372  * pci_clear_master - disables bus-mastering for device dev
4373  * @dev: the PCI device to disable
4374  */
4375 void pci_clear_master(struct pci_dev *dev)
4376 {
4377 	__pci_set_master(dev, false);
4378 }
4379 EXPORT_SYMBOL(pci_clear_master);
4380 
4381 /**
4382  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4383  * @dev: the PCI device for which MWI is to be enabled
4384  *
4385  * Helper function for pci_set_mwi.
4386  * Originally copied from drivers/net/acenic.c.
4387  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4388  *
4389  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4390  */
4391 int pci_set_cacheline_size(struct pci_dev *dev)
4392 {
4393 	u8 cacheline_size;
4394 
4395 	if (!pci_cache_line_size)
4396 		return -EINVAL;
4397 
4398 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4399 	   equal to or multiple of the right value. */
4400 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4401 	if (cacheline_size >= pci_cache_line_size &&
4402 	    (cacheline_size % pci_cache_line_size) == 0)
4403 		return 0;
4404 
4405 	/* Write the correct value. */
4406 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4407 	/* Read it back. */
4408 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4409 	if (cacheline_size == pci_cache_line_size)
4410 		return 0;
4411 
4412 	pci_dbg(dev, "cache line size of %d is not supported\n",
4413 		   pci_cache_line_size << 2);
4414 
4415 	return -EINVAL;
4416 }
4417 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4418 
4419 /**
4420  * pci_set_mwi - enables memory-write-invalidate PCI transaction
4421  * @dev: the PCI device for which MWI is enabled
4422  *
4423  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4424  *
4425  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4426  */
4427 int pci_set_mwi(struct pci_dev *dev)
4428 {
4429 #ifdef PCI_DISABLE_MWI
4430 	return 0;
4431 #else
4432 	int rc;
4433 	u16 cmd;
4434 
4435 	rc = pci_set_cacheline_size(dev);
4436 	if (rc)
4437 		return rc;
4438 
4439 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4440 	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4441 		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4442 		cmd |= PCI_COMMAND_INVALIDATE;
4443 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4444 	}
4445 	return 0;
4446 #endif
4447 }
4448 EXPORT_SYMBOL(pci_set_mwi);
4449 
4450 /**
4451  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4452  * @dev: the PCI device for which MWI is enabled
4453  *
4454  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4455  * Callers are not required to check the return value.
4456  *
4457  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4458  */
4459 int pci_try_set_mwi(struct pci_dev *dev)
4460 {
4461 #ifdef PCI_DISABLE_MWI
4462 	return 0;
4463 #else
4464 	return pci_set_mwi(dev);
4465 #endif
4466 }
4467 EXPORT_SYMBOL(pci_try_set_mwi);
4468 
4469 /**
4470  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4471  * @dev: the PCI device to disable
4472  *
4473  * Disables PCI Memory-Write-Invalidate transaction on the device
4474  */
4475 void pci_clear_mwi(struct pci_dev *dev)
4476 {
4477 #ifndef PCI_DISABLE_MWI
4478 	u16 cmd;
4479 
4480 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4481 	if (cmd & PCI_COMMAND_INVALIDATE) {
4482 		cmd &= ~PCI_COMMAND_INVALIDATE;
4483 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4484 	}
4485 #endif
4486 }
4487 EXPORT_SYMBOL(pci_clear_mwi);
4488 
4489 /**
4490  * pci_disable_parity - disable parity checking for device
4491  * @dev: the PCI device to operate on
4492  *
4493  * Disable parity checking for device @dev
4494  */
4495 void pci_disable_parity(struct pci_dev *dev)
4496 {
4497 	u16 cmd;
4498 
4499 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4500 	if (cmd & PCI_COMMAND_PARITY) {
4501 		cmd &= ~PCI_COMMAND_PARITY;
4502 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4503 	}
4504 }
4505 
4506 /**
4507  * pci_intx - enables/disables PCI INTx for device dev
4508  * @pdev: the PCI device to operate on
4509  * @enable: boolean: whether to enable or disable PCI INTx
4510  *
4511  * Enables/disables PCI INTx for device @pdev
4512  */
4513 void pci_intx(struct pci_dev *pdev, int enable)
4514 {
4515 	u16 pci_command, new;
4516 
4517 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4518 
4519 	if (enable)
4520 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4521 	else
4522 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
4523 
4524 	if (new == pci_command)
4525 		return;
4526 
4527 	pci_write_config_word(pdev, PCI_COMMAND, new);
4528 }
4529 EXPORT_SYMBOL_GPL(pci_intx);
4530 
4531 /**
4532  * pci_wait_for_pending_transaction - wait for pending transaction
4533  * @dev: the PCI device to operate on
4534  *
4535  * Return 0 if transaction is pending 1 otherwise.
4536  */
4537 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4538 {
4539 	if (!pci_is_pcie(dev))
4540 		return 1;
4541 
4542 	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4543 				    PCI_EXP_DEVSTA_TRPND);
4544 }
4545 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4546 
4547 /**
4548  * pcie_flr - initiate a PCIe function level reset
4549  * @dev: device to reset
4550  *
4551  * Initiate a function level reset unconditionally on @dev without
4552  * checking any flags and DEVCAP
4553  */
4554 int pcie_flr(struct pci_dev *dev)
4555 {
4556 	if (!pci_wait_for_pending_transaction(dev))
4557 		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4558 
4559 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4560 
4561 	if (dev->imm_ready)
4562 		return 0;
4563 
4564 	/*
4565 	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4566 	 * 100ms, but may silently discard requests while the FLR is in
4567 	 * progress.  Wait 100ms before trying to access the device.
4568 	 */
4569 	msleep(100);
4570 
4571 	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4572 }
4573 EXPORT_SYMBOL_GPL(pcie_flr);
4574 
4575 /**
4576  * pcie_reset_flr - initiate a PCIe function level reset
4577  * @dev: device to reset
4578  * @probe: if true, return 0 if device can be reset this way
4579  *
4580  * Initiate a function level reset on @dev.
4581  */
4582 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4583 {
4584 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4585 		return -ENOTTY;
4586 
4587 	if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4588 		return -ENOTTY;
4589 
4590 	if (probe)
4591 		return 0;
4592 
4593 	return pcie_flr(dev);
4594 }
4595 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4596 
4597 static int pci_af_flr(struct pci_dev *dev, bool probe)
4598 {
4599 	int pos;
4600 	u8 cap;
4601 
4602 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4603 	if (!pos)
4604 		return -ENOTTY;
4605 
4606 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4607 		return -ENOTTY;
4608 
4609 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4610 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4611 		return -ENOTTY;
4612 
4613 	if (probe)
4614 		return 0;
4615 
4616 	/*
4617 	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4618 	 * is used, so we use the control offset rather than status and shift
4619 	 * the test bit to match.
4620 	 */
4621 	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4622 				 PCI_AF_STATUS_TP << 8))
4623 		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4624 
4625 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4626 
4627 	if (dev->imm_ready)
4628 		return 0;
4629 
4630 	/*
4631 	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4632 	 * updated 27 July 2006; a device must complete an FLR within
4633 	 * 100ms, but may silently discard requests while the FLR is in
4634 	 * progress.  Wait 100ms before trying to access the device.
4635 	 */
4636 	msleep(100);
4637 
4638 	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4639 }
4640 
4641 /**
4642  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4643  * @dev: Device to reset.
4644  * @probe: if true, return 0 if the device can be reset this way.
4645  *
4646  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4647  * unset, it will be reinitialized internally when going from PCI_D3hot to
4648  * PCI_D0.  If that's the case and the device is not in a low-power state
4649  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4650  *
4651  * NOTE: This causes the caller to sleep for twice the device power transition
4652  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4653  * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4654  * Moreover, only devices in D0 can be reset by this function.
4655  */
4656 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4657 {
4658 	u16 csr;
4659 
4660 	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4661 		return -ENOTTY;
4662 
4663 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4664 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4665 		return -ENOTTY;
4666 
4667 	if (probe)
4668 		return 0;
4669 
4670 	if (dev->current_state != PCI_D0)
4671 		return -EINVAL;
4672 
4673 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4674 	csr |= PCI_D3hot;
4675 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4676 	pci_dev_d3_sleep(dev);
4677 
4678 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4679 	csr |= PCI_D0;
4680 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4681 	pci_dev_d3_sleep(dev);
4682 
4683 	return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4684 }
4685 
4686 /**
4687  * pcie_wait_for_link_status - Wait for link status change
4688  * @pdev: Device whose link to wait for.
4689  * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE.
4690  * @active: Waiting for active or inactive?
4691  *
4692  * Return 0 if successful, or -ETIMEDOUT if status has not changed within
4693  * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4694  */
4695 static int pcie_wait_for_link_status(struct pci_dev *pdev,
4696 				     bool use_lt, bool active)
4697 {
4698 	u16 lnksta_mask, lnksta_match;
4699 	unsigned long end_jiffies;
4700 	u16 lnksta;
4701 
4702 	lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA;
4703 	lnksta_match = active ? lnksta_mask : 0;
4704 
4705 	end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS);
4706 	do {
4707 		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
4708 		if ((lnksta & lnksta_mask) == lnksta_match)
4709 			return 0;
4710 		msleep(1);
4711 	} while (time_before(jiffies, end_jiffies));
4712 
4713 	return -ETIMEDOUT;
4714 }
4715 
4716 /**
4717  * pcie_retrain_link - Request a link retrain and wait for it to complete
4718  * @pdev: Device whose link to retrain.
4719  * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
4720  *
4721  * Retrain completion status is retrieved from the Link Status Register
4722  * according to @use_lt.  It is not verified whether the use of the DLLLA
4723  * bit is valid.
4724  *
4725  * Return 0 if successful, or -ETIMEDOUT if training has not completed
4726  * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4727  */
4728 int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
4729 {
4730 	int rc;
4731 
4732 	/*
4733 	 * Ensure the updated LNKCTL parameters are used during link
4734 	 * training by checking that there is no ongoing link training that
4735 	 * may have started before link parameters were changed, so as to
4736 	 * avoid LTSSM race as recommended in Implementation Note at the end
4737 	 * of PCIe r6.1 sec 7.5.3.7.
4738 	 */
4739 	rc = pcie_wait_for_link_status(pdev, true, false);
4740 	if (rc)
4741 		return rc;
4742 
4743 	pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4744 	if (pdev->clear_retrain_link) {
4745 		/*
4746 		 * Due to an erratum in some devices the Retrain Link bit
4747 		 * needs to be cleared again manually to allow the link
4748 		 * training to succeed.
4749 		 */
4750 		pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4751 	}
4752 
4753 	rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
4754 
4755 	/*
4756 	 * Clear LBMS after a manual retrain so that the bit can be used
4757 	 * to track link speed or width changes made by hardware itself
4758 	 * in attempt to correct unreliable link operation.
4759 	 */
4760 	pcie_reset_lbms_count(pdev);
4761 	return rc;
4762 }
4763 
4764 /**
4765  * pcie_wait_for_link_delay - Wait until link is active or inactive
4766  * @pdev: Bridge device
4767  * @active: waiting for active or inactive?
4768  * @delay: Delay to wait after link has become active (in ms)
4769  *
4770  * Use this to wait till link becomes active or inactive.
4771  */
4772 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4773 				     int delay)
4774 {
4775 	int rc;
4776 
4777 	/*
4778 	 * Some controllers might not implement link active reporting. In this
4779 	 * case, we wait for 1000 ms + any delay requested by the caller.
4780 	 */
4781 	if (!pdev->link_active_reporting) {
4782 		msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay);
4783 		return true;
4784 	}
4785 
4786 	/*
4787 	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4788 	 * after which we should expect the link to be active if the reset was
4789 	 * successful. If so, software must wait a minimum 100ms before sending
4790 	 * configuration requests to devices downstream this port.
4791 	 *
4792 	 * If the link fails to activate, either the device was physically
4793 	 * removed or the link is permanently failed.
4794 	 */
4795 	if (active)
4796 		msleep(20);
4797 	rc = pcie_wait_for_link_status(pdev, false, active);
4798 	if (active) {
4799 		if (rc)
4800 			rc = pcie_failed_link_retrain(pdev);
4801 		if (rc)
4802 			return false;
4803 
4804 		msleep(delay);
4805 		return true;
4806 	}
4807 
4808 	if (rc)
4809 		return false;
4810 
4811 	return true;
4812 }
4813 
4814 /**
4815  * pcie_wait_for_link - Wait until link is active or inactive
4816  * @pdev: Bridge device
4817  * @active: waiting for active or inactive?
4818  *
4819  * Use this to wait till link becomes active or inactive.
4820  */
4821 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4822 {
4823 	return pcie_wait_for_link_delay(pdev, active, 100);
4824 }
4825 
4826 /*
4827  * Find maximum D3cold delay required by all the devices on the bus.  The
4828  * spec says 100 ms, but firmware can lower it and we allow drivers to
4829  * increase it as well.
4830  *
4831  * Called with @pci_bus_sem locked for reading.
4832  */
4833 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4834 {
4835 	const struct pci_dev *pdev;
4836 	int min_delay = 100;
4837 	int max_delay = 0;
4838 
4839 	list_for_each_entry(pdev, &bus->devices, bus_list) {
4840 		if (pdev->d3cold_delay < min_delay)
4841 			min_delay = pdev->d3cold_delay;
4842 		if (pdev->d3cold_delay > max_delay)
4843 			max_delay = pdev->d3cold_delay;
4844 	}
4845 
4846 	return max(min_delay, max_delay);
4847 }
4848 
4849 /**
4850  * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4851  * @dev: PCI bridge
4852  * @reset_type: reset type in human-readable form
4853  *
4854  * Handle necessary delays before access to the devices on the secondary
4855  * side of the bridge are permitted after D3cold to D0 transition
4856  * or Conventional Reset.
4857  *
4858  * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4859  * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4860  * 4.3.2.
4861  *
4862  * Return 0 on success or -ENOTTY if the first device on the secondary bus
4863  * failed to become accessible.
4864  */
4865 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
4866 {
4867 	struct pci_dev *child __free(pci_dev_put) = NULL;
4868 	int delay;
4869 
4870 	if (pci_dev_is_disconnected(dev))
4871 		return 0;
4872 
4873 	if (!pci_is_bridge(dev))
4874 		return 0;
4875 
4876 	down_read(&pci_bus_sem);
4877 
4878 	/*
4879 	 * We only deal with devices that are present currently on the bus.
4880 	 * For any hot-added devices the access delay is handled in pciehp
4881 	 * board_added(). In case of ACPI hotplug the firmware is expected
4882 	 * to configure the devices before OS is notified.
4883 	 */
4884 	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4885 		up_read(&pci_bus_sem);
4886 		return 0;
4887 	}
4888 
4889 	/* Take d3cold_delay requirements into account */
4890 	delay = pci_bus_max_d3cold_delay(dev->subordinate);
4891 	if (!delay) {
4892 		up_read(&pci_bus_sem);
4893 		return 0;
4894 	}
4895 
4896 	child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
4897 					     struct pci_dev, bus_list));
4898 	up_read(&pci_bus_sem);
4899 
4900 	/*
4901 	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4902 	 * accessing the device after reset (that is 1000 ms + 100 ms).
4903 	 */
4904 	if (!pci_is_pcie(dev)) {
4905 		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4906 		msleep(1000 + delay);
4907 		return 0;
4908 	}
4909 
4910 	/*
4911 	 * For PCIe downstream and root ports that do not support speeds
4912 	 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4913 	 * speeds (gen3) we need to wait first for the data link layer to
4914 	 * become active.
4915 	 *
4916 	 * However, 100 ms is the minimum and the PCIe spec says the
4917 	 * software must allow at least 1s before it can determine that the
4918 	 * device that did not respond is a broken device. Also device can
4919 	 * take longer than that to respond if it indicates so through Request
4920 	 * Retry Status completions.
4921 	 *
4922 	 * Therefore we wait for 100 ms and check for the device presence
4923 	 * until the timeout expires.
4924 	 */
4925 	if (!pcie_downstream_port(dev))
4926 		return 0;
4927 
4928 	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4929 		u16 status;
4930 
4931 		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4932 		msleep(delay);
4933 
4934 		if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay))
4935 			return 0;
4936 
4937 		/*
4938 		 * If the port supports active link reporting we now check
4939 		 * whether the link is active and if not bail out early with
4940 		 * the assumption that the device is not present anymore.
4941 		 */
4942 		if (!dev->link_active_reporting)
4943 			return -ENOTTY;
4944 
4945 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status);
4946 		if (!(status & PCI_EXP_LNKSTA_DLLLA))
4947 			return -ENOTTY;
4948 
4949 		return pci_dev_wait(child, reset_type,
4950 				    PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT);
4951 	}
4952 
4953 	pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4954 		delay);
4955 	if (!pcie_wait_for_link_delay(dev, true, delay)) {
4956 		/* Did not train, no need to wait any further */
4957 		pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
4958 		return -ENOTTY;
4959 	}
4960 
4961 	return pci_dev_wait(child, reset_type,
4962 			    PCIE_RESET_READY_POLL_MS - delay);
4963 }
4964 
4965 void pci_reset_secondary_bus(struct pci_dev *dev)
4966 {
4967 	u16 ctrl;
4968 
4969 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4970 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4971 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4972 
4973 	/*
4974 	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
4975 	 * this to 2ms to ensure that we meet the minimum requirement.
4976 	 */
4977 	msleep(2);
4978 
4979 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4980 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4981 }
4982 
4983 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4984 {
4985 	pci_reset_secondary_bus(dev);
4986 }
4987 
4988 /**
4989  * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4990  * @dev: Bridge device
4991  *
4992  * Use the bridge control register to assert reset on the secondary bus.
4993  * Devices on the secondary bus are left in power-on state.
4994  */
4995 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4996 {
4997 	if (!dev->block_cfg_access)
4998 		pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
4999 			      __builtin_return_address(0));
5000 	pcibios_reset_secondary_bus(dev);
5001 
5002 	return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
5003 }
5004 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
5005 
5006 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
5007 {
5008 	struct pci_dev *pdev;
5009 
5010 	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5011 	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5012 		return -ENOTTY;
5013 
5014 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5015 		if (pdev != dev)
5016 			return -ENOTTY;
5017 
5018 	if (probe)
5019 		return 0;
5020 
5021 	return pci_bridge_secondary_bus_reset(dev->bus->self);
5022 }
5023 
5024 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5025 {
5026 	int rc = -ENOTTY;
5027 
5028 	if (!hotplug || !try_module_get(hotplug->owner))
5029 		return rc;
5030 
5031 	if (hotplug->ops->reset_slot)
5032 		rc = hotplug->ops->reset_slot(hotplug, probe);
5033 
5034 	module_put(hotplug->owner);
5035 
5036 	return rc;
5037 }
5038 
5039 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5040 {
5041 	if (dev->multifunction || dev->subordinate || !dev->slot ||
5042 	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5043 		return -ENOTTY;
5044 
5045 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5046 }
5047 
5048 static u16 cxl_port_dvsec(struct pci_dev *dev)
5049 {
5050 	return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
5051 					 PCI_DVSEC_CXL_PORT);
5052 }
5053 
5054 static bool cxl_sbr_masked(struct pci_dev *dev)
5055 {
5056 	u16 dvsec, reg;
5057 	int rc;
5058 
5059 	dvsec = cxl_port_dvsec(dev);
5060 	if (!dvsec)
5061 		return false;
5062 
5063 	rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
5064 	if (rc || PCI_POSSIBLE_ERROR(reg))
5065 		return false;
5066 
5067 	/*
5068 	 * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR
5069 	 * bit in Bridge Control has no effect.  When 1, the Port generates
5070 	 * hot reset when the SBR bit is set to 1.
5071 	 */
5072 	if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR)
5073 		return false;
5074 
5075 	return true;
5076 }
5077 
5078 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5079 {
5080 	struct pci_dev *bridge = pci_upstream_bridge(dev);
5081 	int rc;
5082 
5083 	/*
5084 	 * If "dev" is below a CXL port that has SBR control masked, SBR
5085 	 * won't do anything, so return error.
5086 	 */
5087 	if (bridge && cxl_sbr_masked(bridge)) {
5088 		if (probe)
5089 			return 0;
5090 
5091 		return -ENOTTY;
5092 	}
5093 
5094 	rc = pci_dev_reset_slot_function(dev, probe);
5095 	if (rc != -ENOTTY)
5096 		return rc;
5097 	return pci_parent_bus_reset(dev, probe);
5098 }
5099 
5100 static int cxl_reset_bus_function(struct pci_dev *dev, bool probe)
5101 {
5102 	struct pci_dev *bridge;
5103 	u16 dvsec, reg, val;
5104 	int rc;
5105 
5106 	bridge = pci_upstream_bridge(dev);
5107 	if (!bridge)
5108 		return -ENOTTY;
5109 
5110 	dvsec = cxl_port_dvsec(bridge);
5111 	if (!dvsec)
5112 		return -ENOTTY;
5113 
5114 	if (probe)
5115 		return 0;
5116 
5117 	rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
5118 	if (rc)
5119 		return -ENOTTY;
5120 
5121 	if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) {
5122 		val = reg;
5123 	} else {
5124 		val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR;
5125 		pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
5126 				      val);
5127 	}
5128 
5129 	rc = pci_reset_bus_function(dev, probe);
5130 
5131 	if (reg != val)
5132 		pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
5133 				      reg);
5134 
5135 	return rc;
5136 }
5137 
5138 void pci_dev_lock(struct pci_dev *dev)
5139 {
5140 	/* block PM suspend, driver probe, etc. */
5141 	device_lock(&dev->dev);
5142 	pci_cfg_access_lock(dev);
5143 }
5144 EXPORT_SYMBOL_GPL(pci_dev_lock);
5145 
5146 /* Return 1 on successful lock, 0 on contention */
5147 int pci_dev_trylock(struct pci_dev *dev)
5148 {
5149 	if (device_trylock(&dev->dev)) {
5150 		if (pci_cfg_access_trylock(dev))
5151 			return 1;
5152 		device_unlock(&dev->dev);
5153 	}
5154 
5155 	return 0;
5156 }
5157 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5158 
5159 void pci_dev_unlock(struct pci_dev *dev)
5160 {
5161 	pci_cfg_access_unlock(dev);
5162 	device_unlock(&dev->dev);
5163 }
5164 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5165 
5166 static void pci_dev_save_and_disable(struct pci_dev *dev)
5167 {
5168 	const struct pci_error_handlers *err_handler =
5169 			dev->driver ? dev->driver->err_handler : NULL;
5170 
5171 	/*
5172 	 * dev->driver->err_handler->reset_prepare() is protected against
5173 	 * races with ->remove() by the device lock, which must be held by
5174 	 * the caller.
5175 	 */
5176 	if (err_handler && err_handler->reset_prepare)
5177 		err_handler->reset_prepare(dev);
5178 	else if (dev->driver)
5179 		pci_warn(dev, "resetting");
5180 
5181 	/*
5182 	 * Wake-up device prior to save.  PM registers default to D0 after
5183 	 * reset and a simple register restore doesn't reliably return
5184 	 * to a non-D0 state anyway.
5185 	 */
5186 	pci_set_power_state(dev, PCI_D0);
5187 
5188 	pci_save_state(dev);
5189 	/*
5190 	 * Disable the device by clearing the Command register, except for
5191 	 * INTx-disable which is set.  This not only disables MMIO and I/O port
5192 	 * BARs, but also prevents the device from being Bus Master, preventing
5193 	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
5194 	 * compliant devices, INTx-disable prevents legacy interrupts.
5195 	 */
5196 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5197 }
5198 
5199 static void pci_dev_restore(struct pci_dev *dev)
5200 {
5201 	const struct pci_error_handlers *err_handler =
5202 			dev->driver ? dev->driver->err_handler : NULL;
5203 
5204 	pci_restore_state(dev);
5205 
5206 	/*
5207 	 * dev->driver->err_handler->reset_done() is protected against
5208 	 * races with ->remove() by the device lock, which must be held by
5209 	 * the caller.
5210 	 */
5211 	if (err_handler && err_handler->reset_done)
5212 		err_handler->reset_done(dev);
5213 	else if (dev->driver)
5214 		pci_warn(dev, "reset done");
5215 }
5216 
5217 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5218 const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5219 	{ },
5220 	{ pci_dev_specific_reset, .name = "device_specific" },
5221 	{ pci_dev_acpi_reset, .name = "acpi" },
5222 	{ pcie_reset_flr, .name = "flr" },
5223 	{ pci_af_flr, .name = "af_flr" },
5224 	{ pci_pm_reset, .name = "pm" },
5225 	{ pci_reset_bus_function, .name = "bus" },
5226 	{ cxl_reset_bus_function, .name = "cxl_bus" },
5227 };
5228 
5229 /**
5230  * __pci_reset_function_locked - reset a PCI device function while holding
5231  * the @dev mutex lock.
5232  * @dev: PCI device to reset
5233  *
5234  * Some devices allow an individual function to be reset without affecting
5235  * other functions in the same device.  The PCI device must be responsive
5236  * to PCI config space in order to use this function.
5237  *
5238  * The device function is presumed to be unused and the caller is holding
5239  * the device mutex lock when this function is called.
5240  *
5241  * Resetting the device will make the contents of PCI configuration space
5242  * random, so any caller of this must be prepared to reinitialise the
5243  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5244  * etc.
5245  *
5246  * Returns 0 if the device function was successfully reset or negative if the
5247  * device doesn't support resetting a single function.
5248  */
5249 int __pci_reset_function_locked(struct pci_dev *dev)
5250 {
5251 	int i, m, rc;
5252 	const struct pci_reset_fn_method *method;
5253 
5254 	might_sleep();
5255 
5256 	/*
5257 	 * A reset method returns -ENOTTY if it doesn't support this device and
5258 	 * we should try the next method.
5259 	 *
5260 	 * If it returns 0 (success), we're finished.  If it returns any other
5261 	 * error, we're also finished: this indicates that further reset
5262 	 * mechanisms might be broken on the device.
5263 	 */
5264 	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5265 		m = dev->reset_methods[i];
5266 		if (!m)
5267 			return -ENOTTY;
5268 
5269 		method = &pci_reset_fn_methods[m];
5270 		pci_dbg(dev, "reset via %s\n", method->name);
5271 		rc = method->reset_fn(dev, PCI_RESET_DO_RESET);
5272 		if (!rc)
5273 			return 0;
5274 
5275 		pci_dbg(dev, "%s failed with %d\n", method->name, rc);
5276 		if (rc != -ENOTTY)
5277 			return rc;
5278 	}
5279 
5280 	return -ENOTTY;
5281 }
5282 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5283 
5284 /**
5285  * pci_init_reset_methods - check whether device can be safely reset
5286  * and store supported reset mechanisms.
5287  * @dev: PCI device to check for reset mechanisms
5288  *
5289  * Some devices allow an individual function to be reset without affecting
5290  * other functions in the same device.  The PCI device must be in D0-D3hot
5291  * state.
5292  *
5293  * Stores reset mechanisms supported by device in reset_methods byte array
5294  * which is a member of struct pci_dev.
5295  */
5296 void pci_init_reset_methods(struct pci_dev *dev)
5297 {
5298 	int m, i, rc;
5299 
5300 	BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5301 
5302 	might_sleep();
5303 
5304 	i = 0;
5305 	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5306 		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5307 		if (!rc)
5308 			dev->reset_methods[i++] = m;
5309 		else if (rc != -ENOTTY)
5310 			break;
5311 	}
5312 
5313 	dev->reset_methods[i] = 0;
5314 }
5315 
5316 /**
5317  * pci_reset_function - quiesce and reset a PCI device function
5318  * @dev: PCI device to reset
5319  *
5320  * Some devices allow an individual function to be reset without affecting
5321  * other functions in the same device.  The PCI device must be responsive
5322  * to PCI config space in order to use this function.
5323  *
5324  * This function does not just reset the PCI portion of a device, but
5325  * clears all the state associated with the device.  This function differs
5326  * from __pci_reset_function_locked() in that it saves and restores device state
5327  * over the reset and takes the PCI device lock.
5328  *
5329  * Returns 0 if the device function was successfully reset or negative if the
5330  * device doesn't support resetting a single function.
5331  */
5332 int pci_reset_function(struct pci_dev *dev)
5333 {
5334 	struct pci_dev *bridge;
5335 	int rc;
5336 
5337 	if (!pci_reset_supported(dev))
5338 		return -ENOTTY;
5339 
5340 	/*
5341 	 * If there's no upstream bridge, no locking is needed since there is
5342 	 * no upstream bridge configuration to hold consistent.
5343 	 */
5344 	bridge = pci_upstream_bridge(dev);
5345 	if (bridge)
5346 		pci_dev_lock(bridge);
5347 
5348 	pci_dev_lock(dev);
5349 	pci_dev_save_and_disable(dev);
5350 
5351 	rc = __pci_reset_function_locked(dev);
5352 
5353 	pci_dev_restore(dev);
5354 	pci_dev_unlock(dev);
5355 
5356 	if (bridge)
5357 		pci_dev_unlock(bridge);
5358 
5359 	return rc;
5360 }
5361 EXPORT_SYMBOL_GPL(pci_reset_function);
5362 
5363 /**
5364  * pci_reset_function_locked - quiesce and reset a PCI device function
5365  * @dev: PCI device to reset
5366  *
5367  * Some devices allow an individual function to be reset without affecting
5368  * other functions in the same device.  The PCI device must be responsive
5369  * to PCI config space in order to use this function.
5370  *
5371  * This function does not just reset the PCI portion of a device, but
5372  * clears all the state associated with the device.  This function differs
5373  * from __pci_reset_function_locked() in that it saves and restores device state
5374  * over the reset.  It also differs from pci_reset_function() in that it
5375  * requires the PCI device lock to be held.
5376  *
5377  * Returns 0 if the device function was successfully reset or negative if the
5378  * device doesn't support resetting a single function.
5379  */
5380 int pci_reset_function_locked(struct pci_dev *dev)
5381 {
5382 	int rc;
5383 
5384 	if (!pci_reset_supported(dev))
5385 		return -ENOTTY;
5386 
5387 	pci_dev_save_and_disable(dev);
5388 
5389 	rc = __pci_reset_function_locked(dev);
5390 
5391 	pci_dev_restore(dev);
5392 
5393 	return rc;
5394 }
5395 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5396 
5397 /**
5398  * pci_try_reset_function - quiesce and reset a PCI device function
5399  * @dev: PCI device to reset
5400  *
5401  * Same as above, except return -EAGAIN if unable to lock device.
5402  */
5403 int pci_try_reset_function(struct pci_dev *dev)
5404 {
5405 	int rc;
5406 
5407 	if (!pci_reset_supported(dev))
5408 		return -ENOTTY;
5409 
5410 	if (!pci_dev_trylock(dev))
5411 		return -EAGAIN;
5412 
5413 	pci_dev_save_and_disable(dev);
5414 	rc = __pci_reset_function_locked(dev);
5415 	pci_dev_restore(dev);
5416 	pci_dev_unlock(dev);
5417 
5418 	return rc;
5419 }
5420 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5421 
5422 /* Do any devices on or below this bus prevent a bus reset? */
5423 static bool pci_bus_resettable(struct pci_bus *bus)
5424 {
5425 	struct pci_dev *dev;
5426 
5427 
5428 	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5429 		return false;
5430 
5431 	list_for_each_entry(dev, &bus->devices, bus_list) {
5432 		if (!pci_reset_supported(dev))
5433 			return false;
5434 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5435 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5436 			return false;
5437 	}
5438 
5439 	return true;
5440 }
5441 
5442 /* Lock devices from the top of the tree down */
5443 static void pci_bus_lock(struct pci_bus *bus)
5444 {
5445 	struct pci_dev *dev;
5446 
5447 	pci_dev_lock(bus->self);
5448 	list_for_each_entry(dev, &bus->devices, bus_list) {
5449 		if (dev->subordinate)
5450 			pci_bus_lock(dev->subordinate);
5451 		else
5452 			pci_dev_lock(dev);
5453 	}
5454 }
5455 
5456 /* Unlock devices from the bottom of the tree up */
5457 static void pci_bus_unlock(struct pci_bus *bus)
5458 {
5459 	struct pci_dev *dev;
5460 
5461 	list_for_each_entry(dev, &bus->devices, bus_list) {
5462 		if (dev->subordinate)
5463 			pci_bus_unlock(dev->subordinate);
5464 		else
5465 			pci_dev_unlock(dev);
5466 	}
5467 	pci_dev_unlock(bus->self);
5468 }
5469 
5470 /* Return 1 on successful lock, 0 on contention */
5471 static int pci_bus_trylock(struct pci_bus *bus)
5472 {
5473 	struct pci_dev *dev;
5474 
5475 	if (!pci_dev_trylock(bus->self))
5476 		return 0;
5477 
5478 	list_for_each_entry(dev, &bus->devices, bus_list) {
5479 		if (dev->subordinate) {
5480 			if (!pci_bus_trylock(dev->subordinate))
5481 				goto unlock;
5482 		} else if (!pci_dev_trylock(dev))
5483 			goto unlock;
5484 	}
5485 	return 1;
5486 
5487 unlock:
5488 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5489 		if (dev->subordinate)
5490 			pci_bus_unlock(dev->subordinate);
5491 		else
5492 			pci_dev_unlock(dev);
5493 	}
5494 	pci_dev_unlock(bus->self);
5495 	return 0;
5496 }
5497 
5498 /* Do any devices on or below this slot prevent a bus reset? */
5499 static bool pci_slot_resettable(struct pci_slot *slot)
5500 {
5501 	struct pci_dev *dev;
5502 
5503 	if (slot->bus->self &&
5504 	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5505 		return false;
5506 
5507 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5508 		if (!dev->slot || dev->slot != slot)
5509 			continue;
5510 		if (!pci_reset_supported(dev))
5511 			return false;
5512 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5513 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5514 			return false;
5515 	}
5516 
5517 	return true;
5518 }
5519 
5520 /* Lock devices from the top of the tree down */
5521 static void pci_slot_lock(struct pci_slot *slot)
5522 {
5523 	struct pci_dev *dev;
5524 
5525 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5526 		if (!dev->slot || dev->slot != slot)
5527 			continue;
5528 		if (dev->subordinate)
5529 			pci_bus_lock(dev->subordinate);
5530 		else
5531 			pci_dev_lock(dev);
5532 	}
5533 }
5534 
5535 /* Unlock devices from the bottom of the tree up */
5536 static void pci_slot_unlock(struct pci_slot *slot)
5537 {
5538 	struct pci_dev *dev;
5539 
5540 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5541 		if (!dev->slot || dev->slot != slot)
5542 			continue;
5543 		if (dev->subordinate)
5544 			pci_bus_unlock(dev->subordinate);
5545 		pci_dev_unlock(dev);
5546 	}
5547 }
5548 
5549 /* Return 1 on successful lock, 0 on contention */
5550 static int pci_slot_trylock(struct pci_slot *slot)
5551 {
5552 	struct pci_dev *dev;
5553 
5554 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5555 		if (!dev->slot || dev->slot != slot)
5556 			continue;
5557 		if (dev->subordinate) {
5558 			if (!pci_bus_trylock(dev->subordinate)) {
5559 				pci_dev_unlock(dev);
5560 				goto unlock;
5561 			}
5562 		} else if (!pci_dev_trylock(dev))
5563 			goto unlock;
5564 	}
5565 	return 1;
5566 
5567 unlock:
5568 	list_for_each_entry_continue_reverse(dev,
5569 					     &slot->bus->devices, bus_list) {
5570 		if (!dev->slot || dev->slot != slot)
5571 			continue;
5572 		if (dev->subordinate)
5573 			pci_bus_unlock(dev->subordinate);
5574 		else
5575 			pci_dev_unlock(dev);
5576 	}
5577 	return 0;
5578 }
5579 
5580 /*
5581  * Save and disable devices from the top of the tree down while holding
5582  * the @dev mutex lock for the entire tree.
5583  */
5584 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5585 {
5586 	struct pci_dev *dev;
5587 
5588 	list_for_each_entry(dev, &bus->devices, bus_list) {
5589 		pci_dev_save_and_disable(dev);
5590 		if (dev->subordinate)
5591 			pci_bus_save_and_disable_locked(dev->subordinate);
5592 	}
5593 }
5594 
5595 /*
5596  * Restore devices from top of the tree down while holding @dev mutex lock
5597  * for the entire tree.  Parent bridges need to be restored before we can
5598  * get to subordinate devices.
5599  */
5600 static void pci_bus_restore_locked(struct pci_bus *bus)
5601 {
5602 	struct pci_dev *dev;
5603 
5604 	list_for_each_entry(dev, &bus->devices, bus_list) {
5605 		pci_dev_restore(dev);
5606 		if (dev->subordinate) {
5607 			pci_bridge_wait_for_secondary_bus(dev, "bus reset");
5608 			pci_bus_restore_locked(dev->subordinate);
5609 		}
5610 	}
5611 }
5612 
5613 /*
5614  * Save and disable devices from the top of the tree down while holding
5615  * the @dev mutex lock for the entire tree.
5616  */
5617 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5618 {
5619 	struct pci_dev *dev;
5620 
5621 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5622 		if (!dev->slot || dev->slot != slot)
5623 			continue;
5624 		pci_dev_save_and_disable(dev);
5625 		if (dev->subordinate)
5626 			pci_bus_save_and_disable_locked(dev->subordinate);
5627 	}
5628 }
5629 
5630 /*
5631  * Restore devices from top of the tree down while holding @dev mutex lock
5632  * for the entire tree.  Parent bridges need to be restored before we can
5633  * get to subordinate devices.
5634  */
5635 static void pci_slot_restore_locked(struct pci_slot *slot)
5636 {
5637 	struct pci_dev *dev;
5638 
5639 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5640 		if (!dev->slot || dev->slot != slot)
5641 			continue;
5642 		pci_dev_restore(dev);
5643 		if (dev->subordinate) {
5644 			pci_bridge_wait_for_secondary_bus(dev, "slot reset");
5645 			pci_bus_restore_locked(dev->subordinate);
5646 		}
5647 	}
5648 }
5649 
5650 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5651 {
5652 	int rc;
5653 
5654 	if (!slot || !pci_slot_resettable(slot))
5655 		return -ENOTTY;
5656 
5657 	if (!probe)
5658 		pci_slot_lock(slot);
5659 
5660 	might_sleep();
5661 
5662 	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5663 
5664 	if (!probe)
5665 		pci_slot_unlock(slot);
5666 
5667 	return rc;
5668 }
5669 
5670 /**
5671  * pci_probe_reset_slot - probe whether a PCI slot can be reset
5672  * @slot: PCI slot to probe
5673  *
5674  * Return 0 if slot can be reset, negative if a slot reset is not supported.
5675  */
5676 int pci_probe_reset_slot(struct pci_slot *slot)
5677 {
5678 	return pci_slot_reset(slot, PCI_RESET_PROBE);
5679 }
5680 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5681 
5682 /**
5683  * __pci_reset_slot - Try to reset a PCI slot
5684  * @slot: PCI slot to reset
5685  *
5686  * A PCI bus may host multiple slots, each slot may support a reset mechanism
5687  * independent of other slots.  For instance, some slots may support slot power
5688  * control.  In the case of a 1:1 bus to slot architecture, this function may
5689  * wrap the bus reset to avoid spurious slot related events such as hotplug.
5690  * Generally a slot reset should be attempted before a bus reset.  All of the
5691  * function of the slot and any subordinate buses behind the slot are reset
5692  * through this function.  PCI config space of all devices in the slot and
5693  * behind the slot is saved before and restored after reset.
5694  *
5695  * Same as above except return -EAGAIN if the slot cannot be locked
5696  */
5697 static int __pci_reset_slot(struct pci_slot *slot)
5698 {
5699 	int rc;
5700 
5701 	rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5702 	if (rc)
5703 		return rc;
5704 
5705 	if (pci_slot_trylock(slot)) {
5706 		pci_slot_save_and_disable_locked(slot);
5707 		might_sleep();
5708 		rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5709 		pci_slot_restore_locked(slot);
5710 		pci_slot_unlock(slot);
5711 	} else
5712 		rc = -EAGAIN;
5713 
5714 	return rc;
5715 }
5716 
5717 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5718 {
5719 	int ret;
5720 
5721 	if (!bus->self || !pci_bus_resettable(bus))
5722 		return -ENOTTY;
5723 
5724 	if (probe)
5725 		return 0;
5726 
5727 	pci_bus_lock(bus);
5728 
5729 	might_sleep();
5730 
5731 	ret = pci_bridge_secondary_bus_reset(bus->self);
5732 
5733 	pci_bus_unlock(bus);
5734 
5735 	return ret;
5736 }
5737 
5738 /**
5739  * pci_bus_error_reset - reset the bridge's subordinate bus
5740  * @bridge: The parent device that connects to the bus to reset
5741  *
5742  * This function will first try to reset the slots on this bus if the method is
5743  * available. If slot reset fails or is not available, this will fall back to a
5744  * secondary bus reset.
5745  */
5746 int pci_bus_error_reset(struct pci_dev *bridge)
5747 {
5748 	struct pci_bus *bus = bridge->subordinate;
5749 	struct pci_slot *slot;
5750 
5751 	if (!bus)
5752 		return -ENOTTY;
5753 
5754 	mutex_lock(&pci_slot_mutex);
5755 	if (list_empty(&bus->slots))
5756 		goto bus_reset;
5757 
5758 	list_for_each_entry(slot, &bus->slots, list)
5759 		if (pci_probe_reset_slot(slot))
5760 			goto bus_reset;
5761 
5762 	list_for_each_entry(slot, &bus->slots, list)
5763 		if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5764 			goto bus_reset;
5765 
5766 	mutex_unlock(&pci_slot_mutex);
5767 	return 0;
5768 bus_reset:
5769 	mutex_unlock(&pci_slot_mutex);
5770 	return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5771 }
5772 
5773 /**
5774  * pci_probe_reset_bus - probe whether a PCI bus can be reset
5775  * @bus: PCI bus to probe
5776  *
5777  * Return 0 if bus can be reset, negative if a bus reset is not supported.
5778  */
5779 int pci_probe_reset_bus(struct pci_bus *bus)
5780 {
5781 	return pci_bus_reset(bus, PCI_RESET_PROBE);
5782 }
5783 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5784 
5785 /**
5786  * __pci_reset_bus - Try to reset a PCI bus
5787  * @bus: top level PCI bus to reset
5788  *
5789  * Same as above except return -EAGAIN if the bus cannot be locked
5790  */
5791 int __pci_reset_bus(struct pci_bus *bus)
5792 {
5793 	int rc;
5794 
5795 	rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5796 	if (rc)
5797 		return rc;
5798 
5799 	if (pci_bus_trylock(bus)) {
5800 		pci_bus_save_and_disable_locked(bus);
5801 		might_sleep();
5802 		rc = pci_bridge_secondary_bus_reset(bus->self);
5803 		pci_bus_restore_locked(bus);
5804 		pci_bus_unlock(bus);
5805 	} else
5806 		rc = -EAGAIN;
5807 
5808 	return rc;
5809 }
5810 
5811 /**
5812  * pci_reset_bus - Try to reset a PCI bus
5813  * @pdev: top level PCI device to reset via slot/bus
5814  *
5815  * Same as above except return -EAGAIN if the bus cannot be locked
5816  */
5817 int pci_reset_bus(struct pci_dev *pdev)
5818 {
5819 	return (!pci_probe_reset_slot(pdev->slot)) ?
5820 	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5821 }
5822 EXPORT_SYMBOL_GPL(pci_reset_bus);
5823 
5824 /**
5825  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5826  * @dev: PCI device to query
5827  *
5828  * Returns mmrbc: maximum designed memory read count in bytes or
5829  * appropriate error value.
5830  */
5831 int pcix_get_max_mmrbc(struct pci_dev *dev)
5832 {
5833 	int cap;
5834 	u32 stat;
5835 
5836 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5837 	if (!cap)
5838 		return -EINVAL;
5839 
5840 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5841 		return -EINVAL;
5842 
5843 	return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat);
5844 }
5845 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5846 
5847 /**
5848  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5849  * @dev: PCI device to query
5850  *
5851  * Returns mmrbc: maximum memory read count in bytes or appropriate error
5852  * value.
5853  */
5854 int pcix_get_mmrbc(struct pci_dev *dev)
5855 {
5856 	int cap;
5857 	u16 cmd;
5858 
5859 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5860 	if (!cap)
5861 		return -EINVAL;
5862 
5863 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5864 		return -EINVAL;
5865 
5866 	return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5867 }
5868 EXPORT_SYMBOL(pcix_get_mmrbc);
5869 
5870 /**
5871  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5872  * @dev: PCI device to query
5873  * @mmrbc: maximum memory read count in bytes
5874  *    valid values are 512, 1024, 2048, 4096
5875  *
5876  * If possible sets maximum memory read byte count, some bridges have errata
5877  * that prevent this.
5878  */
5879 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5880 {
5881 	int cap;
5882 	u32 stat, v, o;
5883 	u16 cmd;
5884 
5885 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5886 		return -EINVAL;
5887 
5888 	v = ffs(mmrbc) - 10;
5889 
5890 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5891 	if (!cap)
5892 		return -EINVAL;
5893 
5894 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5895 		return -EINVAL;
5896 
5897 	if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat))
5898 		return -E2BIG;
5899 
5900 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5901 		return -EINVAL;
5902 
5903 	o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5904 	if (o != v) {
5905 		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5906 			return -EIO;
5907 
5908 		cmd &= ~PCI_X_CMD_MAX_READ;
5909 		cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v);
5910 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5911 			return -EIO;
5912 	}
5913 	return 0;
5914 }
5915 EXPORT_SYMBOL(pcix_set_mmrbc);
5916 
5917 /**
5918  * pcie_get_readrq - get PCI Express read request size
5919  * @dev: PCI device to query
5920  *
5921  * Returns maximum memory read request in bytes or appropriate error value.
5922  */
5923 int pcie_get_readrq(struct pci_dev *dev)
5924 {
5925 	u16 ctl;
5926 
5927 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5928 
5929 	return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl);
5930 }
5931 EXPORT_SYMBOL(pcie_get_readrq);
5932 
5933 /**
5934  * pcie_set_readrq - set PCI Express maximum memory read request
5935  * @dev: PCI device to query
5936  * @rq: maximum memory read count in bytes
5937  *    valid values are 128, 256, 512, 1024, 2048, 4096
5938  *
5939  * If possible sets maximum memory read request in bytes
5940  */
5941 int pcie_set_readrq(struct pci_dev *dev, int rq)
5942 {
5943 	u16 v;
5944 	int ret;
5945 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
5946 
5947 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5948 		return -EINVAL;
5949 
5950 	/*
5951 	 * If using the "performance" PCIe config, we clamp the read rq
5952 	 * size to the max packet size to keep the host bridge from
5953 	 * generating requests larger than we can cope with.
5954 	 */
5955 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5956 		int mps = pcie_get_mps(dev);
5957 
5958 		if (mps < rq)
5959 			rq = mps;
5960 	}
5961 
5962 	v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, ffs(rq) - 8);
5963 
5964 	if (bridge->no_inc_mrrs) {
5965 		int max_mrrs = pcie_get_readrq(dev);
5966 
5967 		if (rq > max_mrrs) {
5968 			pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
5969 			return -EINVAL;
5970 		}
5971 	}
5972 
5973 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5974 						  PCI_EXP_DEVCTL_READRQ, v);
5975 
5976 	return pcibios_err_to_errno(ret);
5977 }
5978 EXPORT_SYMBOL(pcie_set_readrq);
5979 
5980 /**
5981  * pcie_get_mps - get PCI Express maximum payload size
5982  * @dev: PCI device to query
5983  *
5984  * Returns maximum payload size in bytes
5985  */
5986 int pcie_get_mps(struct pci_dev *dev)
5987 {
5988 	u16 ctl;
5989 
5990 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5991 
5992 	return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl);
5993 }
5994 EXPORT_SYMBOL(pcie_get_mps);
5995 
5996 /**
5997  * pcie_set_mps - set PCI Express maximum payload size
5998  * @dev: PCI device to query
5999  * @mps: maximum payload size in bytes
6000  *    valid values are 128, 256, 512, 1024, 2048, 4096
6001  *
6002  * If possible sets maximum payload size
6003  */
6004 int pcie_set_mps(struct pci_dev *dev, int mps)
6005 {
6006 	u16 v;
6007 	int ret;
6008 
6009 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6010 		return -EINVAL;
6011 
6012 	v = ffs(mps) - 8;
6013 	if (v > dev->pcie_mpss)
6014 		return -EINVAL;
6015 	v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v);
6016 
6017 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6018 						  PCI_EXP_DEVCTL_PAYLOAD, v);
6019 
6020 	return pcibios_err_to_errno(ret);
6021 }
6022 EXPORT_SYMBOL(pcie_set_mps);
6023 
6024 static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
6025 {
6026 	return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
6027 }
6028 
6029 int pcie_link_speed_mbps(struct pci_dev *pdev)
6030 {
6031 	u16 lnksta;
6032 	int err;
6033 
6034 	err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
6035 	if (err)
6036 		return err;
6037 
6038 	return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta));
6039 }
6040 EXPORT_SYMBOL(pcie_link_speed_mbps);
6041 
6042 /**
6043  * pcie_bandwidth_available - determine minimum link settings of a PCIe
6044  *			      device and its bandwidth limitation
6045  * @dev: PCI device to query
6046  * @limiting_dev: storage for device causing the bandwidth limitation
6047  * @speed: storage for speed of limiting device
6048  * @width: storage for width of limiting device
6049  *
6050  * Walk up the PCI device chain and find the point where the minimum
6051  * bandwidth is available.  Return the bandwidth available there and (if
6052  * limiting_dev, speed, and width pointers are supplied) information about
6053  * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
6054  * raw bandwidth.
6055  */
6056 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6057 			     enum pci_bus_speed *speed,
6058 			     enum pcie_link_width *width)
6059 {
6060 	u16 lnksta;
6061 	enum pci_bus_speed next_speed;
6062 	enum pcie_link_width next_width;
6063 	u32 bw, next_bw;
6064 
6065 	if (speed)
6066 		*speed = PCI_SPEED_UNKNOWN;
6067 	if (width)
6068 		*width = PCIE_LNK_WIDTH_UNKNOWN;
6069 
6070 	bw = 0;
6071 
6072 	while (dev) {
6073 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6074 
6075 		next_speed = to_pcie_link_speed(lnksta);
6076 		next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
6077 
6078 		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6079 
6080 		/* Check if current device limits the total bandwidth */
6081 		if (!bw || next_bw <= bw) {
6082 			bw = next_bw;
6083 
6084 			if (limiting_dev)
6085 				*limiting_dev = dev;
6086 			if (speed)
6087 				*speed = next_speed;
6088 			if (width)
6089 				*width = next_width;
6090 		}
6091 
6092 		dev = pci_upstream_bridge(dev);
6093 	}
6094 
6095 	return bw;
6096 }
6097 EXPORT_SYMBOL(pcie_bandwidth_available);
6098 
6099 /**
6100  * pcie_get_supported_speeds - query Supported Link Speed Vector
6101  * @dev: PCI device to query
6102  *
6103  * Query @dev supported link speeds.
6104  *
6105  * Implementation Note in PCIe r6.0 sec 7.5.3.18 recommends determining
6106  * supported link speeds using the Supported Link Speeds Vector in the Link
6107  * Capabilities 2 Register (when available).
6108  *
6109  * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.
6110  *
6111  * Without Link Capabilities 2, i.e., prior to PCIe r3.0, Supported Link
6112  * Speeds field in Link Capabilities is used and only 2.5 GT/s and 5.0 GT/s
6113  * speeds were defined.
6114  *
6115  * For @dev without Supported Link Speed Vector, the field is synthesized
6116  * from the Max Link Speed field in the Link Capabilities Register.
6117  *
6118  * Return: Supported Link Speeds Vector (+ reserved 0 at LSB).
6119  */
6120 u8 pcie_get_supported_speeds(struct pci_dev *dev)
6121 {
6122 	u32 lnkcap2, lnkcap;
6123 	u8 speeds;
6124 
6125 	/*
6126 	 * Speeds retain the reserved 0 at LSB before PCIe Supported Link
6127 	 * Speeds Vector to allow using SLS Vector bit defines directly.
6128 	 */
6129 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6130 	speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS;
6131 
6132 	/* Ignore speeds higher than Max Link Speed */
6133 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6134 	speeds &= GENMASK(lnkcap & PCI_EXP_LNKCAP_SLS, 0);
6135 
6136 	/* PCIe r3.0-compliant */
6137 	if (speeds)
6138 		return speeds;
6139 
6140 	/* Synthesize from the Max Link Speed field */
6141 	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6142 		speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB;
6143 	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6144 		speeds = PCI_EXP_LNKCAP2_SLS_2_5GB;
6145 
6146 	return speeds;
6147 }
6148 
6149 /**
6150  * pcie_get_speed_cap - query for the PCI device's link speed capability
6151  * @dev: PCI device to query
6152  *
6153  * Query the PCI device speed capability.
6154  *
6155  * Return: the maximum link speed supported by the device.
6156  */
6157 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6158 {
6159 	return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds);
6160 }
6161 EXPORT_SYMBOL(pcie_get_speed_cap);
6162 
6163 /**
6164  * pcie_get_width_cap - query for the PCI device's link width capability
6165  * @dev: PCI device to query
6166  *
6167  * Query the PCI device width capability.  Return the maximum link width
6168  * supported by the device.
6169  */
6170 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6171 {
6172 	u32 lnkcap;
6173 
6174 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6175 	if (lnkcap)
6176 		return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
6177 
6178 	return PCIE_LNK_WIDTH_UNKNOWN;
6179 }
6180 EXPORT_SYMBOL(pcie_get_width_cap);
6181 
6182 /**
6183  * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6184  * @dev: PCI device
6185  * @speed: storage for link speed
6186  * @width: storage for link width
6187  *
6188  * Calculate a PCI device's link bandwidth by querying for its link speed
6189  * and width, multiplying them, and applying encoding overhead.  The result
6190  * is in Mb/s, i.e., megabits/second of raw bandwidth.
6191  */
6192 static u32 pcie_bandwidth_capable(struct pci_dev *dev,
6193 				  enum pci_bus_speed *speed,
6194 				  enum pcie_link_width *width)
6195 {
6196 	*speed = pcie_get_speed_cap(dev);
6197 	*width = pcie_get_width_cap(dev);
6198 
6199 	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6200 		return 0;
6201 
6202 	return *width * PCIE_SPEED2MBS_ENC(*speed);
6203 }
6204 
6205 /**
6206  * __pcie_print_link_status - Report the PCI device's link speed and width
6207  * @dev: PCI device to query
6208  * @verbose: Print info even when enough bandwidth is available
6209  *
6210  * If the available bandwidth at the device is less than the device is
6211  * capable of, report the device's maximum possible bandwidth and the
6212  * upstream link that limits its performance.  If @verbose, always print
6213  * the available bandwidth, even if the device isn't constrained.
6214  */
6215 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6216 {
6217 	enum pcie_link_width width, width_cap;
6218 	enum pci_bus_speed speed, speed_cap;
6219 	struct pci_dev *limiting_dev = NULL;
6220 	u32 bw_avail, bw_cap;
6221 	char *flit_mode = "";
6222 
6223 	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6224 	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6225 
6226 	if (dev->bus && dev->bus->flit_mode)
6227 		flit_mode = ", in Flit mode";
6228 
6229 	if (bw_avail >= bw_cap && verbose)
6230 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)%s\n",
6231 			 bw_cap / 1000, bw_cap % 1000,
6232 			 pci_speed_string(speed_cap), width_cap, flit_mode);
6233 	else if (bw_avail < bw_cap)
6234 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)%s\n",
6235 			 bw_avail / 1000, bw_avail % 1000,
6236 			 pci_speed_string(speed), width,
6237 			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6238 			 bw_cap / 1000, bw_cap % 1000,
6239 			 pci_speed_string(speed_cap), width_cap, flit_mode);
6240 }
6241 
6242 /**
6243  * pcie_print_link_status - Report the PCI device's link speed and width
6244  * @dev: PCI device to query
6245  *
6246  * Report the available bandwidth at the device.
6247  */
6248 void pcie_print_link_status(struct pci_dev *dev)
6249 {
6250 	__pcie_print_link_status(dev, true);
6251 }
6252 EXPORT_SYMBOL(pcie_print_link_status);
6253 
6254 /**
6255  * pci_select_bars - Make BAR mask from the type of resource
6256  * @dev: the PCI device for which BAR mask is made
6257  * @flags: resource type mask to be selected
6258  *
6259  * This helper routine makes bar mask from the type of resource.
6260  */
6261 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6262 {
6263 	int i, bars = 0;
6264 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
6265 		if (pci_resource_flags(dev, i) & flags)
6266 			bars |= (1 << i);
6267 	return bars;
6268 }
6269 EXPORT_SYMBOL(pci_select_bars);
6270 
6271 /* Some architectures require additional programming to enable VGA */
6272 static arch_set_vga_state_t arch_set_vga_state;
6273 
6274 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6275 {
6276 	arch_set_vga_state = func;	/* NULL disables */
6277 }
6278 
6279 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6280 				  unsigned int command_bits, u32 flags)
6281 {
6282 	if (arch_set_vga_state)
6283 		return arch_set_vga_state(dev, decode, command_bits,
6284 						flags);
6285 	return 0;
6286 }
6287 
6288 /**
6289  * pci_set_vga_state - set VGA decode state on device and parents if requested
6290  * @dev: the PCI device
6291  * @decode: true = enable decoding, false = disable decoding
6292  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6293  * @flags: traverse ancestors and change bridges
6294  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6295  */
6296 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6297 		      unsigned int command_bits, u32 flags)
6298 {
6299 	struct pci_bus *bus;
6300 	struct pci_dev *bridge;
6301 	u16 cmd;
6302 	int rc;
6303 
6304 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6305 
6306 	/* ARCH specific VGA enables */
6307 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6308 	if (rc)
6309 		return rc;
6310 
6311 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6312 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
6313 		if (decode)
6314 			cmd |= command_bits;
6315 		else
6316 			cmd &= ~command_bits;
6317 		pci_write_config_word(dev, PCI_COMMAND, cmd);
6318 	}
6319 
6320 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6321 		return 0;
6322 
6323 	bus = dev->bus;
6324 	while (bus) {
6325 		bridge = bus->self;
6326 		if (bridge) {
6327 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6328 					     &cmd);
6329 			if (decode)
6330 				cmd |= PCI_BRIDGE_CTL_VGA;
6331 			else
6332 				cmd &= ~PCI_BRIDGE_CTL_VGA;
6333 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6334 					      cmd);
6335 		}
6336 		bus = bus->parent;
6337 	}
6338 	return 0;
6339 }
6340 
6341 #ifdef CONFIG_ACPI
6342 bool pci_pr3_present(struct pci_dev *pdev)
6343 {
6344 	struct acpi_device *adev;
6345 
6346 	if (acpi_disabled)
6347 		return false;
6348 
6349 	adev = ACPI_COMPANION(&pdev->dev);
6350 	if (!adev)
6351 		return false;
6352 
6353 	return adev->power.flags.power_resources &&
6354 		acpi_has_method(adev->handle, "_PR3");
6355 }
6356 EXPORT_SYMBOL_GPL(pci_pr3_present);
6357 #endif
6358 
6359 /**
6360  * pci_add_dma_alias - Add a DMA devfn alias for a device
6361  * @dev: the PCI device for which alias is added
6362  * @devfn_from: alias slot and function
6363  * @nr_devfns: number of subsequent devfns to alias
6364  *
6365  * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6366  * which is used to program permissible bus-devfn source addresses for DMA
6367  * requests in an IOMMU.  These aliases factor into IOMMU group creation
6368  * and are useful for devices generating DMA requests beyond or different
6369  * from their logical bus-devfn.  Examples include device quirks where the
6370  * device simply uses the wrong devfn, as well as non-transparent bridges
6371  * where the alias may be a proxy for devices in another domain.
6372  *
6373  * IOMMU group creation is performed during device discovery or addition,
6374  * prior to any potential DMA mapping and therefore prior to driver probing
6375  * (especially for userspace assigned devices where IOMMU group definition
6376  * cannot be left as a userspace activity).  DMA aliases should therefore
6377  * be configured via quirks, such as the PCI fixup header quirk.
6378  */
6379 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6380 		       unsigned int nr_devfns)
6381 {
6382 	int devfn_to;
6383 
6384 	nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6385 	devfn_to = devfn_from + nr_devfns - 1;
6386 
6387 	if (!dev->dma_alias_mask)
6388 		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6389 	if (!dev->dma_alias_mask) {
6390 		pci_warn(dev, "Unable to allocate DMA alias mask\n");
6391 		return;
6392 	}
6393 
6394 	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6395 
6396 	if (nr_devfns == 1)
6397 		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6398 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6399 	else if (nr_devfns > 1)
6400 		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6401 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6402 				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6403 }
6404 
6405 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6406 {
6407 	return (dev1->dma_alias_mask &&
6408 		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6409 	       (dev2->dma_alias_mask &&
6410 		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6411 	       pci_real_dma_dev(dev1) == dev2 ||
6412 	       pci_real_dma_dev(dev2) == dev1;
6413 }
6414 
6415 bool pci_device_is_present(struct pci_dev *pdev)
6416 {
6417 	u32 v;
6418 
6419 	/* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6420 	pdev = pci_physfn(pdev);
6421 	if (pci_dev_is_disconnected(pdev))
6422 		return false;
6423 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6424 }
6425 EXPORT_SYMBOL_GPL(pci_device_is_present);
6426 
6427 void pci_ignore_hotplug(struct pci_dev *dev)
6428 {
6429 	struct pci_dev *bridge = dev->bus->self;
6430 
6431 	dev->ignore_hotplug = 1;
6432 	/* Propagate the "ignore hotplug" setting to the parent bridge. */
6433 	if (bridge)
6434 		bridge->ignore_hotplug = 1;
6435 }
6436 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6437 
6438 /**
6439  * pci_real_dma_dev - Get PCI DMA device for PCI device
6440  * @dev: the PCI device that may have a PCI DMA alias
6441  *
6442  * Permits the platform to provide architecture-specific functionality to
6443  * devices needing to alias DMA to another PCI device on another PCI bus. If
6444  * the PCI device is on the same bus, it is recommended to use
6445  * pci_add_dma_alias(). This is the default implementation. Architecture
6446  * implementations can override this.
6447  */
6448 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6449 {
6450 	return dev;
6451 }
6452 
6453 resource_size_t __weak pcibios_default_alignment(void)
6454 {
6455 	return 0;
6456 }
6457 
6458 /*
6459  * Arches that don't want to expose struct resource to userland as-is in
6460  * sysfs and /proc can implement their own pci_resource_to_user().
6461  */
6462 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6463 				 const struct resource *rsrc,
6464 				 resource_size_t *start, resource_size_t *end)
6465 {
6466 	*start = rsrc->start;
6467 	*end = rsrc->end;
6468 }
6469 
6470 static char *resource_alignment_param;
6471 static DEFINE_SPINLOCK(resource_alignment_lock);
6472 
6473 /**
6474  * pci_specified_resource_alignment - get resource alignment specified by user.
6475  * @dev: the PCI device to get
6476  * @resize: whether or not to change resources' size when reassigning alignment
6477  *
6478  * RETURNS: Resource alignment if it is specified.
6479  *          Zero if it is not specified.
6480  */
6481 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6482 							bool *resize)
6483 {
6484 	int align_order, count;
6485 	resource_size_t align = pcibios_default_alignment();
6486 	const char *p;
6487 	int ret;
6488 
6489 	spin_lock(&resource_alignment_lock);
6490 	p = resource_alignment_param;
6491 	if (!p || !*p)
6492 		goto out;
6493 	if (pci_has_flag(PCI_PROBE_ONLY)) {
6494 		align = 0;
6495 		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6496 		goto out;
6497 	}
6498 
6499 	while (*p) {
6500 		count = 0;
6501 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6502 		    p[count] == '@') {
6503 			p += count + 1;
6504 			if (align_order > 63) {
6505 				pr_err("PCI: Invalid requested alignment (order %d)\n",
6506 				       align_order);
6507 				align_order = PAGE_SHIFT;
6508 			}
6509 		} else {
6510 			align_order = PAGE_SHIFT;
6511 		}
6512 
6513 		ret = pci_dev_str_match(dev, p, &p);
6514 		if (ret == 1) {
6515 			*resize = true;
6516 			align = 1ULL << align_order;
6517 			break;
6518 		} else if (ret < 0) {
6519 			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6520 			       p);
6521 			break;
6522 		}
6523 
6524 		if (*p != ';' && *p != ',') {
6525 			/* End of param or invalid format */
6526 			break;
6527 		}
6528 		p++;
6529 	}
6530 out:
6531 	spin_unlock(&resource_alignment_lock);
6532 	return align;
6533 }
6534 
6535 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6536 					   resource_size_t align, bool resize)
6537 {
6538 	struct resource *r = &dev->resource[bar];
6539 	const char *r_name = pci_resource_name(dev, bar);
6540 	resource_size_t size;
6541 
6542 	if (!(r->flags & IORESOURCE_MEM))
6543 		return;
6544 
6545 	if (r->flags & IORESOURCE_PCI_FIXED) {
6546 		pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
6547 			 r_name, r, (unsigned long long)align);
6548 		return;
6549 	}
6550 
6551 	size = resource_size(r);
6552 	if (size >= align)
6553 		return;
6554 
6555 	/*
6556 	 * Increase the alignment of the resource.  There are two ways we
6557 	 * can do this:
6558 	 *
6559 	 * 1) Increase the size of the resource.  BARs are aligned on their
6560 	 *    size, so when we reallocate space for this resource, we'll
6561 	 *    allocate it with the larger alignment.  This also prevents
6562 	 *    assignment of any other BARs inside the alignment region, so
6563 	 *    if we're requesting page alignment, this means no other BARs
6564 	 *    will share the page.
6565 	 *
6566 	 *    The disadvantage is that this makes the resource larger than
6567 	 *    the hardware BAR, which may break drivers that compute things
6568 	 *    based on the resource size, e.g., to find registers at a
6569 	 *    fixed offset before the end of the BAR.
6570 	 *
6571 	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6572 	 *    set r->start to the desired alignment.  By itself this
6573 	 *    doesn't prevent other BARs being put inside the alignment
6574 	 *    region, but if we realign *every* resource of every device in
6575 	 *    the system, none of them will share an alignment region.
6576 	 *
6577 	 * When the user has requested alignment for only some devices via
6578 	 * the "pci=resource_alignment" argument, "resize" is true and we
6579 	 * use the first method.  Otherwise we assume we're aligning all
6580 	 * devices and we use the second.
6581 	 */
6582 
6583 	pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
6584 		 r_name, r, (unsigned long long)align);
6585 
6586 	if (resize) {
6587 		r->start = 0;
6588 		r->end = align - 1;
6589 	} else {
6590 		r->flags &= ~IORESOURCE_SIZEALIGN;
6591 		r->flags |= IORESOURCE_STARTALIGN;
6592 		resource_set_range(r, align, size);
6593 	}
6594 	r->flags |= IORESOURCE_UNSET;
6595 }
6596 
6597 /*
6598  * This function disables memory decoding and releases memory resources
6599  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6600  * It also rounds up size to specified alignment.
6601  * Later on, the kernel will assign page-aligned memory resource back
6602  * to the device.
6603  */
6604 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6605 {
6606 	int i;
6607 	struct resource *r;
6608 	resource_size_t align;
6609 	u16 command;
6610 	bool resize = false;
6611 
6612 	/*
6613 	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6614 	 * 3.4.1.11.  Their resources are allocated from the space
6615 	 * described by the VF BARx register in the PF's SR-IOV capability.
6616 	 * We can't influence their alignment here.
6617 	 */
6618 	if (dev->is_virtfn)
6619 		return;
6620 
6621 	/* check if specified PCI is target device to reassign */
6622 	align = pci_specified_resource_alignment(dev, &resize);
6623 	if (!align)
6624 		return;
6625 
6626 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6627 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6628 		pci_warn(dev, "Can't reassign resources to host bridge\n");
6629 		return;
6630 	}
6631 
6632 	pci_read_config_word(dev, PCI_COMMAND, &command);
6633 	command &= ~PCI_COMMAND_MEMORY;
6634 	pci_write_config_word(dev, PCI_COMMAND, command);
6635 
6636 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6637 		pci_request_resource_alignment(dev, i, align, resize);
6638 
6639 	/*
6640 	 * Need to disable bridge's resource window,
6641 	 * to enable the kernel to reassign new resource
6642 	 * window later on.
6643 	 */
6644 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6645 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6646 			r = &dev->resource[i];
6647 			if (!(r->flags & IORESOURCE_MEM))
6648 				continue;
6649 			r->flags |= IORESOURCE_UNSET;
6650 			r->end = resource_size(r) - 1;
6651 			r->start = 0;
6652 		}
6653 		pci_disable_bridge_window(dev);
6654 	}
6655 }
6656 
6657 static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf)
6658 {
6659 	size_t count = 0;
6660 
6661 	spin_lock(&resource_alignment_lock);
6662 	if (resource_alignment_param)
6663 		count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6664 	spin_unlock(&resource_alignment_lock);
6665 
6666 	return count;
6667 }
6668 
6669 static ssize_t resource_alignment_store(const struct bus_type *bus,
6670 					const char *buf, size_t count)
6671 {
6672 	char *param, *old, *end;
6673 
6674 	if (count >= (PAGE_SIZE - 1))
6675 		return -EINVAL;
6676 
6677 	param = kstrndup(buf, count, GFP_KERNEL);
6678 	if (!param)
6679 		return -ENOMEM;
6680 
6681 	end = strchr(param, '\n');
6682 	if (end)
6683 		*end = '\0';
6684 
6685 	spin_lock(&resource_alignment_lock);
6686 	old = resource_alignment_param;
6687 	if (strlen(param)) {
6688 		resource_alignment_param = param;
6689 	} else {
6690 		kfree(param);
6691 		resource_alignment_param = NULL;
6692 	}
6693 	spin_unlock(&resource_alignment_lock);
6694 
6695 	kfree(old);
6696 
6697 	return count;
6698 }
6699 
6700 static BUS_ATTR_RW(resource_alignment);
6701 
6702 static int __init pci_resource_alignment_sysfs_init(void)
6703 {
6704 	return bus_create_file(&pci_bus_type,
6705 					&bus_attr_resource_alignment);
6706 }
6707 late_initcall(pci_resource_alignment_sysfs_init);
6708 
6709 static void pci_no_domains(void)
6710 {
6711 #ifdef CONFIG_PCI_DOMAINS
6712 	pci_domains_supported = 0;
6713 #endif
6714 }
6715 
6716 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6717 static DEFINE_IDA(pci_domain_nr_static_ida);
6718 static DEFINE_IDA(pci_domain_nr_dynamic_ida);
6719 
6720 static void of_pci_reserve_static_domain_nr(void)
6721 {
6722 	struct device_node *np;
6723 	int domain_nr;
6724 
6725 	for_each_node_by_type(np, "pci") {
6726 		domain_nr = of_get_pci_domain_nr(np);
6727 		if (domain_nr < 0)
6728 			continue;
6729 		/*
6730 		 * Permanently allocate domain_nr in dynamic_ida
6731 		 * to prevent it from dynamic allocation.
6732 		 */
6733 		ida_alloc_range(&pci_domain_nr_dynamic_ida,
6734 				domain_nr, domain_nr, GFP_KERNEL);
6735 	}
6736 }
6737 
6738 static int of_pci_bus_find_domain_nr(struct device *parent)
6739 {
6740 	static bool static_domains_reserved = false;
6741 	int domain_nr;
6742 
6743 	/* On the first call scan device tree for static allocations. */
6744 	if (!static_domains_reserved) {
6745 		of_pci_reserve_static_domain_nr();
6746 		static_domains_reserved = true;
6747 	}
6748 
6749 	if (parent) {
6750 		/*
6751 		 * If domain is in DT, allocate it in static IDA.  This
6752 		 * prevents duplicate static allocations in case of errors
6753 		 * in DT.
6754 		 */
6755 		domain_nr = of_get_pci_domain_nr(parent->of_node);
6756 		if (domain_nr >= 0)
6757 			return ida_alloc_range(&pci_domain_nr_static_ida,
6758 					       domain_nr, domain_nr,
6759 					       GFP_KERNEL);
6760 	}
6761 
6762 	/*
6763 	 * If domain was not specified in DT, choose a free ID from dynamic
6764 	 * allocations. All domain numbers from DT are permanently in
6765 	 * dynamic allocations to prevent assigning them to other DT nodes
6766 	 * without static domain.
6767 	 */
6768 	return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
6769 }
6770 
6771 static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6772 {
6773 	if (domain_nr < 0)
6774 		return;
6775 
6776 	/* Release domain from IDA where it was allocated. */
6777 	if (of_get_pci_domain_nr(parent->of_node) == domain_nr)
6778 		ida_free(&pci_domain_nr_static_ida, domain_nr);
6779 	else
6780 		ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
6781 }
6782 
6783 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6784 {
6785 	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6786 			       acpi_pci_bus_find_domain_nr(bus);
6787 }
6788 
6789 void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6790 {
6791 	if (!acpi_disabled)
6792 		return;
6793 	of_pci_bus_release_domain_nr(parent, domain_nr);
6794 }
6795 #endif
6796 
6797 /**
6798  * pci_ext_cfg_avail - can we access extended PCI config space?
6799  *
6800  * Returns 1 if we can access PCI extended config space (offsets
6801  * greater than 0xff). This is the default implementation. Architecture
6802  * implementations can override this.
6803  */
6804 int __weak pci_ext_cfg_avail(void)
6805 {
6806 	return 1;
6807 }
6808 
6809 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6810 {
6811 }
6812 EXPORT_SYMBOL(pci_fixup_cardbus);
6813 
6814 static int __init pci_setup(char *str)
6815 {
6816 	while (str) {
6817 		char *k = strchr(str, ',');
6818 		if (k)
6819 			*k++ = 0;
6820 		if (*str && (str = pcibios_setup(str)) && *str) {
6821 			if (!strcmp(str, "nomsi")) {
6822 				pci_no_msi();
6823 			} else if (!strncmp(str, "noats", 5)) {
6824 				pr_info("PCIe: ATS is disabled\n");
6825 				pcie_ats_disabled = true;
6826 			} else if (!strcmp(str, "noaer")) {
6827 				pci_no_aer();
6828 			} else if (!strcmp(str, "earlydump")) {
6829 				pci_early_dump = true;
6830 			} else if (!strncmp(str, "realloc=", 8)) {
6831 				pci_realloc_get_opt(str + 8);
6832 			} else if (!strncmp(str, "realloc", 7)) {
6833 				pci_realloc_get_opt("on");
6834 			} else if (!strcmp(str, "nodomains")) {
6835 				pci_no_domains();
6836 			} else if (!strncmp(str, "noari", 5)) {
6837 				pcie_ari_disabled = true;
6838 			} else if (!strncmp(str, "notph", 5)) {
6839 				pci_no_tph();
6840 			} else if (!strncmp(str, "cbiosize=", 9)) {
6841 				pci_cardbus_io_size = memparse(str + 9, &str);
6842 			} else if (!strncmp(str, "cbmemsize=", 10)) {
6843 				pci_cardbus_mem_size = memparse(str + 10, &str);
6844 			} else if (!strncmp(str, "resource_alignment=", 19)) {
6845 				resource_alignment_param = str + 19;
6846 			} else if (!strncmp(str, "ecrc=", 5)) {
6847 				pcie_ecrc_get_policy(str + 5);
6848 			} else if (!strncmp(str, "hpiosize=", 9)) {
6849 				pci_hotplug_io_size = memparse(str + 9, &str);
6850 			} else if (!strncmp(str, "hpmmiosize=", 11)) {
6851 				pci_hotplug_mmio_size = memparse(str + 11, &str);
6852 			} else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6853 				pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6854 			} else if (!strncmp(str, "hpmemsize=", 10)) {
6855 				pci_hotplug_mmio_size = memparse(str + 10, &str);
6856 				pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6857 			} else if (!strncmp(str, "hpbussize=", 10)) {
6858 				pci_hotplug_bus_size =
6859 					simple_strtoul(str + 10, &str, 0);
6860 				if (pci_hotplug_bus_size > 0xff)
6861 					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6862 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6863 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
6864 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
6865 				pcie_bus_config = PCIE_BUS_SAFE;
6866 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
6867 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
6868 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6869 				pcie_bus_config = PCIE_BUS_PEER2PEER;
6870 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
6871 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6872 			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
6873 				disable_acs_redir_param = str + 18;
6874 			} else if (!strncmp(str, "config_acs=", 11)) {
6875 				config_acs_param = str + 11;
6876 			} else {
6877 				pr_err("PCI: Unknown option `%s'\n", str);
6878 			}
6879 		}
6880 		str = k;
6881 	}
6882 	return 0;
6883 }
6884 early_param("pci", pci_setup);
6885 
6886 /*
6887  * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6888  * in pci_setup(), above, to point to data in the __initdata section which
6889  * will be freed after the init sequence is complete. We can't allocate memory
6890  * in pci_setup() because some architectures do not have any memory allocation
6891  * service available during an early_param() call. So we allocate memory and
6892  * copy the variable here before the init section is freed.
6893  *
6894  */
6895 static int __init pci_realloc_setup_params(void)
6896 {
6897 	resource_alignment_param = kstrdup(resource_alignment_param,
6898 					   GFP_KERNEL);
6899 	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6900 	config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
6901 
6902 	return 0;
6903 }
6904 pure_initcall(pci_realloc_setup_params);
6905