xref: /linux/drivers/pci/pci.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Bus Services, see include/linux/pci.h for further explanation.
4  *
5  * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6  * David Mosberger-Tang
7  *
8  * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/iommu.h>
17 #include <linux/lockdep.h>
18 #include <linux/msi.h>
19 #include <linux/of.h>
20 #include <linux/pci.h>
21 #include <linux/pm.h>
22 #include <linux/slab.h>
23 #include <linux/module.h>
24 #include <linux/spinlock.h>
25 #include <linux/string.h>
26 #include <linux/log2.h>
27 #include <linux/logic_pio.h>
28 #include <linux/device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pci-ats.h>
31 #include <linux/pci_hotplug.h>
32 #include <linux/vmalloc.h>
33 #include <asm/dma.h>
34 #include <linux/aer.h>
35 #include <linux/bitfield.h>
36 #include "pci.h"
37 
38 DEFINE_MUTEX(pci_slot_mutex);
39 
40 const char *pci_power_names[] = {
41 	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
42 };
43 EXPORT_SYMBOL_GPL(pci_power_names);
44 
45 #ifdef CONFIG_X86_32
46 int isa_dma_bridge_buggy;
47 EXPORT_SYMBOL(isa_dma_bridge_buggy);
48 #endif
49 
50 int pci_pci_problems;
51 EXPORT_SYMBOL(pci_pci_problems);
52 
53 unsigned int pci_pm_d3hot_delay;
54 
55 static void pci_pme_list_scan(struct work_struct *work);
56 
57 static LIST_HEAD(pci_pme_list);
58 static DEFINE_MUTEX(pci_pme_list_mutex);
59 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
60 
61 struct pci_pme_device {
62 	struct list_head list;
63 	struct pci_dev *dev;
64 };
65 
66 #define PME_TIMEOUT 1000 /* How long between PME checks */
67 
68 /*
69  * Following exit from Conventional Reset, devices must be ready within 1 sec
70  * (PCIe r6.0 sec 6.6.1).  A D3cold to D0 transition implies a Conventional
71  * Reset (PCIe r6.0 sec 5.8).
72  */
73 #define PCI_RESET_WAIT 1000 /* msec */
74 
75 /*
76  * Devices may extend the 1 sec period through Request Retry Status
77  * completions (PCIe r6.0 sec 2.3.1).  The spec does not provide an upper
78  * limit, but 60 sec ought to be enough for any device to become
79  * responsive.
80  */
81 #define PCIE_RESET_READY_POLL_MS 60000 /* msec */
82 
pci_dev_d3_sleep(struct pci_dev * dev)83 static void pci_dev_d3_sleep(struct pci_dev *dev)
84 {
85 	unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
86 	unsigned int upper;
87 
88 	if (delay_ms) {
89 		/* Use a 20% upper bound, 1ms minimum */
90 		upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
91 		usleep_range(delay_ms * USEC_PER_MSEC,
92 			     (delay_ms + upper) * USEC_PER_MSEC);
93 	}
94 }
95 
pci_reset_supported(struct pci_dev * dev)96 bool pci_reset_supported(struct pci_dev *dev)
97 {
98 	return dev->reset_methods[0] != 0;
99 }
100 
101 #ifdef CONFIG_PCI_DOMAINS
102 int pci_domains_supported = 1;
103 #endif
104 
105 #define DEFAULT_HOTPLUG_IO_SIZE		(256)
106 #define DEFAULT_HOTPLUG_MMIO_SIZE	(2*1024*1024)
107 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE	(2*1024*1024)
108 /* hpiosize=nn can override this */
109 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
110 /*
111  * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
112  * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
113  * pci=hpmemsize=nnM overrides both
114  */
115 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
116 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
117 
118 #define DEFAULT_HOTPLUG_BUS_SIZE	1
119 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
120 
121 
122 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
123 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
124 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
125 #elif defined CONFIG_PCIE_BUS_SAFE
126 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
127 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
128 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
129 #elif defined CONFIG_PCIE_BUS_PEER2PEER
130 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
131 #else
132 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
133 #endif
134 
135 /*
136  * The default CLS is used if arch didn't set CLS explicitly and not
137  * all pci devices agree on the same value.  Arch can override either
138  * the dfl or actual value as it sees fit.  Don't forget this is
139  * measured in 32-bit words, not bytes.
140  */
141 u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2;
142 u8 pci_cache_line_size __ro_after_init ;
143 
144 /*
145  * If we set up a device for bus mastering, we need to check the latency
146  * timer as certain BIOSes forget to set it properly.
147  */
148 unsigned int pcibios_max_latency = 255;
149 
150 /* If set, the PCIe ARI capability will not be used. */
151 static bool pcie_ari_disabled;
152 
153 /* If set, the PCIe ATS capability will not be used. */
154 static bool pcie_ats_disabled;
155 
156 /* If set, the PCI config space of each device is printed during boot. */
157 bool pci_early_dump;
158 
pci_ats_disabled(void)159 bool pci_ats_disabled(void)
160 {
161 	return pcie_ats_disabled;
162 }
163 EXPORT_SYMBOL_GPL(pci_ats_disabled);
164 
165 /* Disable bridge_d3 for all PCIe ports */
166 static bool pci_bridge_d3_disable;
167 /* Force bridge_d3 for all PCIe ports */
168 static bool pci_bridge_d3_force;
169 
pcie_port_pm_setup(char * str)170 static int __init pcie_port_pm_setup(char *str)
171 {
172 	if (!strcmp(str, "off"))
173 		pci_bridge_d3_disable = true;
174 	else if (!strcmp(str, "force"))
175 		pci_bridge_d3_force = true;
176 	return 1;
177 }
178 __setup("pcie_port_pm=", pcie_port_pm_setup);
179 
180 /**
181  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
182  * @bus: pointer to PCI bus structure to search
183  *
184  * Given a PCI bus, returns the highest PCI bus number present in the set
185  * including the given PCI bus and its list of child PCI buses.
186  */
pci_bus_max_busnr(struct pci_bus * bus)187 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
188 {
189 	struct pci_bus *tmp;
190 	unsigned char max, n;
191 
192 	max = bus->busn_res.end;
193 	list_for_each_entry(tmp, &bus->children, node) {
194 		n = pci_bus_max_busnr(tmp);
195 		if (n > max)
196 			max = n;
197 	}
198 	return max;
199 }
200 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
201 
202 /**
203  * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
204  * @pdev: the PCI device
205  *
206  * Returns error bits set in PCI_STATUS and clears them.
207  */
pci_status_get_and_clear_errors(struct pci_dev * pdev)208 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
209 {
210 	u16 status;
211 	int ret;
212 
213 	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
214 	if (ret != PCIBIOS_SUCCESSFUL)
215 		return -EIO;
216 
217 	status &= PCI_STATUS_ERROR_BITS;
218 	if (status)
219 		pci_write_config_word(pdev, PCI_STATUS, status);
220 
221 	return status;
222 }
223 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
224 
225 #ifdef CONFIG_HAS_IOMEM
__pci_ioremap_resource(struct pci_dev * pdev,int bar,bool write_combine)226 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
227 					    bool write_combine)
228 {
229 	struct resource *res = &pdev->resource[bar];
230 	resource_size_t start = res->start;
231 	resource_size_t size = resource_size(res);
232 
233 	/*
234 	 * Make sure the BAR is actually a memory resource, not an IO resource
235 	 */
236 	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
237 		pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
238 		return NULL;
239 	}
240 
241 	if (write_combine)
242 		return ioremap_wc(start, size);
243 
244 	return ioremap(start, size);
245 }
246 
pci_ioremap_bar(struct pci_dev * pdev,int bar)247 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
248 {
249 	return __pci_ioremap_resource(pdev, bar, false);
250 }
251 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
252 
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)253 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
254 {
255 	return __pci_ioremap_resource(pdev, bar, true);
256 }
257 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
258 #endif
259 
260 /**
261  * pci_dev_str_match_path - test if a path string matches a device
262  * @dev: the PCI device to test
263  * @path: string to match the device against
264  * @endptr: pointer to the string after the match
265  *
266  * Test if a string (typically from a kernel parameter) formatted as a
267  * path of device/function addresses matches a PCI device. The string must
268  * be of the form:
269  *
270  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
271  *
272  * A path for a device can be obtained using 'lspci -t'.  Using a path
273  * is more robust against bus renumbering than using only a single bus,
274  * device and function address.
275  *
276  * Returns 1 if the string matches the device, 0 if it does not and
277  * a negative error code if it fails to parse the string.
278  */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)279 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
280 				  const char **endptr)
281 {
282 	int ret;
283 	unsigned int seg, bus, slot, func;
284 	char *wpath, *p;
285 	char end;
286 
287 	*endptr = strchrnul(path, ';');
288 
289 	wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
290 	if (!wpath)
291 		return -ENOMEM;
292 
293 	while (1) {
294 		p = strrchr(wpath, '/');
295 		if (!p)
296 			break;
297 		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
298 		if (ret != 2) {
299 			ret = -EINVAL;
300 			goto free_and_exit;
301 		}
302 
303 		if (dev->devfn != PCI_DEVFN(slot, func)) {
304 			ret = 0;
305 			goto free_and_exit;
306 		}
307 
308 		/*
309 		 * Note: we don't need to get a reference to the upstream
310 		 * bridge because we hold a reference to the top level
311 		 * device which should hold a reference to the bridge,
312 		 * and so on.
313 		 */
314 		dev = pci_upstream_bridge(dev);
315 		if (!dev) {
316 			ret = 0;
317 			goto free_and_exit;
318 		}
319 
320 		*p = 0;
321 	}
322 
323 	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
324 		     &func, &end);
325 	if (ret != 4) {
326 		seg = 0;
327 		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
328 		if (ret != 3) {
329 			ret = -EINVAL;
330 			goto free_and_exit;
331 		}
332 	}
333 
334 	ret = (seg == pci_domain_nr(dev->bus) &&
335 	       bus == dev->bus->number &&
336 	       dev->devfn == PCI_DEVFN(slot, func));
337 
338 free_and_exit:
339 	kfree(wpath);
340 	return ret;
341 }
342 
343 /**
344  * pci_dev_str_match - test if a string matches a device
345  * @dev: the PCI device to test
346  * @p: string to match the device against
347  * @endptr: pointer to the string after the match
348  *
349  * Test if a string (typically from a kernel parameter) matches a specified
350  * PCI device. The string may be of one of the following formats:
351  *
352  *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
353  *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
354  *
355  * The first format specifies a PCI bus/device/function address which
356  * may change if new hardware is inserted, if motherboard firmware changes,
357  * or due to changes caused in kernel parameters. If the domain is
358  * left unspecified, it is taken to be 0.  In order to be robust against
359  * bus renumbering issues, a path of PCI device/function numbers may be used
360  * to address the specific device.  The path for a device can be determined
361  * through the use of 'lspci -t'.
362  *
363  * The second format matches devices using IDs in the configuration
364  * space which may match multiple devices in the system. A value of 0
365  * for any field will match all devices. (Note: this differs from
366  * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
367  * legacy reasons and convenience so users don't have to specify
368  * FFFFFFFFs on the command line.)
369  *
370  * Returns 1 if the string matches the device, 0 if it does not and
371  * a negative error code if the string cannot be parsed.
372  */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)373 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
374 			     const char **endptr)
375 {
376 	int ret;
377 	int count;
378 	unsigned short vendor, device, subsystem_vendor, subsystem_device;
379 
380 	if (strncmp(p, "pci:", 4) == 0) {
381 		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
382 		p += 4;
383 		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
384 			     &subsystem_vendor, &subsystem_device, &count);
385 		if (ret != 4) {
386 			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
387 			if (ret != 2)
388 				return -EINVAL;
389 
390 			subsystem_vendor = 0;
391 			subsystem_device = 0;
392 		}
393 
394 		p += count;
395 
396 		if ((!vendor || vendor == dev->vendor) &&
397 		    (!device || device == dev->device) &&
398 		    (!subsystem_vendor ||
399 			    subsystem_vendor == dev->subsystem_vendor) &&
400 		    (!subsystem_device ||
401 			    subsystem_device == dev->subsystem_device))
402 			goto found;
403 	} else {
404 		/*
405 		 * PCI Bus, Device, Function IDs are specified
406 		 * (optionally, may include a path of devfns following it)
407 		 */
408 		ret = pci_dev_str_match_path(dev, p, &p);
409 		if (ret < 0)
410 			return ret;
411 		else if (ret)
412 			goto found;
413 	}
414 
415 	*endptr = p;
416 	return 0;
417 
418 found:
419 	*endptr = p;
420 	return 1;
421 }
422 
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)423 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
424 			      u8 pos, int cap)
425 {
426 	return PCI_FIND_NEXT_CAP(pci_bus_read_config, pos, cap, NULL, bus, devfn);
427 }
428 
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)429 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
430 {
431 	return __pci_find_next_cap(dev->bus, dev->devfn,
432 				   pos + PCI_CAP_LIST_NEXT, cap);
433 }
434 EXPORT_SYMBOL_GPL(pci_find_next_capability);
435 
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)436 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
437 				    unsigned int devfn, u8 hdr_type)
438 {
439 	u16 status;
440 
441 	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
442 	if (!(status & PCI_STATUS_CAP_LIST))
443 		return 0;
444 
445 	switch (hdr_type) {
446 	case PCI_HEADER_TYPE_NORMAL:
447 	case PCI_HEADER_TYPE_BRIDGE:
448 		return PCI_CAPABILITY_LIST;
449 	case PCI_HEADER_TYPE_CARDBUS:
450 		return PCI_CB_CAPABILITY_LIST;
451 	}
452 
453 	return 0;
454 }
455 
456 /**
457  * pci_find_capability - query for devices' capabilities
458  * @dev: PCI device to query
459  * @cap: capability code
460  *
461  * Tell if a device supports a given PCI capability.
462  * Returns the address of the requested capability structure within the
463  * device's PCI configuration space or 0 in case the device does not
464  * support it.  Possible values for @cap include:
465  *
466  *  %PCI_CAP_ID_PM           Power Management
467  *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
468  *  %PCI_CAP_ID_VPD          Vital Product Data
469  *  %PCI_CAP_ID_SLOTID       Slot Identification
470  *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
471  *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
472  *  %PCI_CAP_ID_PCIX         PCI-X
473  *  %PCI_CAP_ID_EXP          PCI Express
474  */
pci_find_capability(struct pci_dev * dev,int cap)475 u8 pci_find_capability(struct pci_dev *dev, int cap)
476 {
477 	u8 pos;
478 
479 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
480 	if (pos)
481 		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
482 
483 	return pos;
484 }
485 EXPORT_SYMBOL(pci_find_capability);
486 
487 /**
488  * pci_bus_find_capability - query for devices' capabilities
489  * @bus: the PCI bus to query
490  * @devfn: PCI device to query
491  * @cap: capability code
492  *
493  * Like pci_find_capability() but works for PCI devices that do not have a
494  * pci_dev structure set up yet.
495  *
496  * Returns the address of the requested capability structure within the
497  * device's PCI configuration space or 0 in case the device does not
498  * support it.
499  */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)500 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
501 {
502 	u8 hdr_type, pos;
503 
504 	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
505 
506 	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK);
507 	if (pos)
508 		pos = __pci_find_next_cap(bus, devfn, pos, cap);
509 
510 	return pos;
511 }
512 EXPORT_SYMBOL(pci_bus_find_capability);
513 
514 /**
515  * pci_find_next_ext_capability - Find an extended capability
516  * @dev: PCI device to query
517  * @start: address at which to start looking (0 to start at beginning of list)
518  * @cap: capability code
519  *
520  * Returns the address of the next matching extended capability structure
521  * within the device's PCI configuration space or 0 if the device does
522  * not support it.  Some capabilities can occur several times, e.g., the
523  * vendor-specific capability, and this provides a way to find them all.
524  */
pci_find_next_ext_capability(struct pci_dev * dev,u16 start,int cap)525 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
526 {
527 	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
528 		return 0;
529 
530 	return PCI_FIND_NEXT_EXT_CAP(pci_bus_read_config, start, cap,
531 				     NULL, dev->bus, dev->devfn);
532 }
533 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
534 
535 /**
536  * pci_find_ext_capability - Find an extended capability
537  * @dev: PCI device to query
538  * @cap: capability code
539  *
540  * Returns the address of the requested extended capability structure
541  * within the device's PCI configuration space or 0 if the device does
542  * not support it.  Possible values for @cap include:
543  *
544  *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
545  *  %PCI_EXT_CAP_ID_VC		Virtual Channel
546  *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
547  *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
548  */
pci_find_ext_capability(struct pci_dev * dev,int cap)549 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
550 {
551 	return pci_find_next_ext_capability(dev, 0, cap);
552 }
553 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
554 
555 /**
556  * pci_get_dsn - Read and return the 8-byte Device Serial Number
557  * @dev: PCI device to query
558  *
559  * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
560  * Number.
561  *
562  * Returns the DSN, or zero if the capability does not exist.
563  */
pci_get_dsn(struct pci_dev * dev)564 u64 pci_get_dsn(struct pci_dev *dev)
565 {
566 	u32 dword;
567 	u64 dsn;
568 	int pos;
569 
570 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
571 	if (!pos)
572 		return 0;
573 
574 	/*
575 	 * The Device Serial Number is two dwords offset 4 bytes from the
576 	 * capability position. The specification says that the first dword is
577 	 * the lower half, and the second dword is the upper half.
578 	 */
579 	pos += 4;
580 	pci_read_config_dword(dev, pos, &dword);
581 	dsn = (u64)dword;
582 	pci_read_config_dword(dev, pos + 4, &dword);
583 	dsn |= ((u64)dword) << 32;
584 
585 	return dsn;
586 }
587 EXPORT_SYMBOL_GPL(pci_get_dsn);
588 
__pci_find_next_ht_cap(struct pci_dev * dev,u8 pos,int ht_cap)589 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
590 {
591 	int rc;
592 	u8 cap, mask;
593 
594 	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
595 		mask = HT_3BIT_CAP_MASK;
596 	else
597 		mask = HT_5BIT_CAP_MASK;
598 
599 	pos = PCI_FIND_NEXT_CAP(pci_bus_read_config, pos,
600 				PCI_CAP_ID_HT, NULL, dev->bus, dev->devfn);
601 	while (pos) {
602 		rc = pci_read_config_byte(dev, pos + 3, &cap);
603 		if (rc != PCIBIOS_SUCCESSFUL)
604 			return 0;
605 
606 		if ((cap & mask) == ht_cap)
607 			return pos;
608 
609 		pos = PCI_FIND_NEXT_CAP(pci_bus_read_config,
610 					pos + PCI_CAP_LIST_NEXT,
611 					PCI_CAP_ID_HT, NULL, dev->bus,
612 					dev->devfn);
613 	}
614 
615 	return 0;
616 }
617 
618 /**
619  * pci_find_next_ht_capability - query a device's HyperTransport capabilities
620  * @dev: PCI device to query
621  * @pos: Position from which to continue searching
622  * @ht_cap: HyperTransport capability code
623  *
624  * To be used in conjunction with pci_find_ht_capability() to search for
625  * all capabilities matching @ht_cap. @pos should always be a value returned
626  * from pci_find_ht_capability().
627  *
628  * NB. To be 100% safe against broken PCI devices, the caller should take
629  * steps to avoid an infinite loop.
630  */
pci_find_next_ht_capability(struct pci_dev * dev,u8 pos,int ht_cap)631 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
632 {
633 	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
634 }
635 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
636 
637 /**
638  * pci_find_ht_capability - query a device's HyperTransport capabilities
639  * @dev: PCI device to query
640  * @ht_cap: HyperTransport capability code
641  *
642  * Tell if a device supports a given HyperTransport capability.
643  * Returns an address within the device's PCI configuration space
644  * or 0 in case the device does not support the request capability.
645  * The address points to the PCI capability, of type PCI_CAP_ID_HT,
646  * which has a HyperTransport capability matching @ht_cap.
647  */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)648 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
649 {
650 	u8 pos;
651 
652 	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
653 	if (pos)
654 		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
655 
656 	return pos;
657 }
658 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
659 
660 /**
661  * pci_find_vsec_capability - Find a vendor-specific extended capability
662  * @dev: PCI device to query
663  * @vendor: Vendor ID for which capability is defined
664  * @cap: Vendor-specific capability ID
665  *
666  * If @dev has Vendor ID @vendor, search for a VSEC capability with
667  * VSEC ID @cap. If found, return the capability offset in
668  * config space; otherwise return 0.
669  */
pci_find_vsec_capability(struct pci_dev * dev,u16 vendor,int cap)670 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
671 {
672 	u16 vsec = 0;
673 	u32 header;
674 	int ret;
675 
676 	if (vendor != dev->vendor)
677 		return 0;
678 
679 	while ((vsec = pci_find_next_ext_capability(dev, vsec,
680 						     PCI_EXT_CAP_ID_VNDR))) {
681 		ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
682 		if (ret != PCIBIOS_SUCCESSFUL)
683 			continue;
684 
685 		if (PCI_VNDR_HEADER_ID(header) == cap)
686 			return vsec;
687 	}
688 
689 	return 0;
690 }
691 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
692 
693 /**
694  * pci_find_dvsec_capability - Find DVSEC for vendor
695  * @dev: PCI device to query
696  * @vendor: Vendor ID to match for the DVSEC
697  * @dvsec: Designated Vendor-specific capability ID
698  *
699  * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
700  * offset in config space; otherwise return 0.
701  */
pci_find_dvsec_capability(struct pci_dev * dev,u16 vendor,u16 dvsec)702 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
703 {
704 	int pos;
705 
706 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
707 	if (!pos)
708 		return 0;
709 
710 	while (pos) {
711 		u16 v, id;
712 
713 		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
714 		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
715 		if (vendor == v && dvsec == id)
716 			return pos;
717 
718 		pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
719 	}
720 
721 	return 0;
722 }
723 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
724 
725 /**
726  * pci_find_parent_resource - return resource region of parent bus of given
727  *			      region
728  * @dev: PCI device structure contains resources to be searched
729  * @res: child resource record for which parent is sought
730  *
731  * For given resource region of given device, return the resource region of
732  * parent bus the given region is contained in.
733  */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)734 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
735 					  struct resource *res)
736 {
737 	const struct pci_bus *bus = dev->bus;
738 	struct resource *r;
739 
740 	pci_bus_for_each_resource(bus, r) {
741 		if (!r)
742 			continue;
743 		if (resource_contains(r, res)) {
744 
745 			/*
746 			 * If the window is prefetchable but the BAR is
747 			 * not, the allocator made a mistake.
748 			 */
749 			if (r->flags & IORESOURCE_PREFETCH &&
750 			    !(res->flags & IORESOURCE_PREFETCH))
751 				return NULL;
752 
753 			/*
754 			 * If we're below a transparent bridge, there may
755 			 * be both a positively-decoded aperture and a
756 			 * subtractively-decoded region that contain the BAR.
757 			 * We want the positively-decoded one, so this depends
758 			 * on pci_bus_for_each_resource() giving us those
759 			 * first.
760 			 */
761 			return r;
762 		}
763 	}
764 	return NULL;
765 }
766 EXPORT_SYMBOL(pci_find_parent_resource);
767 
768 /**
769  * pci_find_resource - Return matching PCI device resource
770  * @dev: PCI device to query
771  * @res: Resource to look for
772  *
773  * Goes over standard PCI resources (BARs) and checks if the given resource
774  * is partially or fully contained in any of them. In that case the
775  * matching resource is returned, %NULL otherwise.
776  */
pci_find_resource(struct pci_dev * dev,struct resource * res)777 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
778 {
779 	int i;
780 
781 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
782 		struct resource *r = &dev->resource[i];
783 
784 		if (r->start && resource_contains(r, res))
785 			return r;
786 	}
787 
788 	return NULL;
789 }
790 EXPORT_SYMBOL(pci_find_resource);
791 
792 /**
793  * pci_resource_name - Return the name of the PCI resource
794  * @dev: PCI device to query
795  * @i: index of the resource
796  *
797  * Return the standard PCI resource (BAR) name according to their index.
798  */
pci_resource_name(struct pci_dev * dev,unsigned int i)799 const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
800 {
801 	static const char * const bar_name[] = {
802 		"BAR 0",
803 		"BAR 1",
804 		"BAR 2",
805 		"BAR 3",
806 		"BAR 4",
807 		"BAR 5",
808 		"ROM",
809 #ifdef CONFIG_PCI_IOV
810 		"VF BAR 0",
811 		"VF BAR 1",
812 		"VF BAR 2",
813 		"VF BAR 3",
814 		"VF BAR 4",
815 		"VF BAR 5",
816 #endif
817 		"bridge window",	/* "io" included in %pR */
818 		"bridge window",	/* "mem" included in %pR */
819 		"bridge window",	/* "mem pref" included in %pR */
820 	};
821 	static const char * const cardbus_name[] = {
822 		"BAR 1",
823 		"unknown",
824 		"unknown",
825 		"unknown",
826 		"unknown",
827 		"unknown",
828 #ifdef CONFIG_PCI_IOV
829 		"unknown",
830 		"unknown",
831 		"unknown",
832 		"unknown",
833 		"unknown",
834 		"unknown",
835 #endif
836 		"CardBus bridge window 0",	/* I/O */
837 		"CardBus bridge window 1",	/* I/O */
838 		"CardBus bridge window 0",	/* mem */
839 		"CardBus bridge window 1",	/* mem */
840 	};
841 
842 	if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
843 	    i < ARRAY_SIZE(cardbus_name))
844 		return cardbus_name[i];
845 
846 	if (i < ARRAY_SIZE(bar_name))
847 		return bar_name[i];
848 
849 	return "unknown";
850 }
851 
852 /**
853  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
854  * @dev: the PCI device to operate on
855  * @pos: config space offset of status word
856  * @mask: mask of bit(s) to care about in status word
857  *
858  * Return 1 when mask bit(s) in status word clear, 0 otherwise.
859  */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)860 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
861 {
862 	int i;
863 
864 	/* Wait for Transaction Pending bit clean */
865 	for (i = 0; i < 4; i++) {
866 		u16 status;
867 		if (i)
868 			msleep((1 << (i - 1)) * 100);
869 
870 		pci_read_config_word(dev, pos, &status);
871 		if (!(status & mask))
872 			return 1;
873 	}
874 
875 	return 0;
876 }
877 
878 static int pci_acs_enable;
879 
880 /**
881  * pci_request_acs - ask for ACS to be enabled if supported
882  */
pci_request_acs(void)883 void pci_request_acs(void)
884 {
885 	pci_acs_enable = 1;
886 }
887 
888 static const char *disable_acs_redir_param;
889 static const char *config_acs_param;
890 
891 struct pci_acs {
892 	u16 ctrl;
893 	u16 fw_ctrl;
894 };
895 
__pci_config_acs(struct pci_dev * dev,struct pci_acs * caps,const char * p,const u16 acs_mask,const u16 acs_flags)896 static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
897 			     const char *p, const u16 acs_mask, const u16 acs_flags)
898 {
899 	u16 flags = acs_flags;
900 	u16 mask = acs_mask;
901 	char *delimit;
902 	int ret = 0;
903 
904 	if (!p)
905 		return;
906 
907 	while (*p) {
908 		if (!acs_mask) {
909 			/* Check for ACS flags */
910 			delimit = strstr(p, "@");
911 			if (delimit) {
912 				int end;
913 				u32 shift = 0;
914 
915 				end = delimit - p - 1;
916 				mask = 0;
917 				flags = 0;
918 
919 				while (end > -1) {
920 					if (*(p + end) == '0') {
921 						mask |= 1 << shift;
922 						shift++;
923 						end--;
924 					} else if (*(p + end) == '1') {
925 						mask |= 1 << shift;
926 						flags |= 1 << shift;
927 						shift++;
928 						end--;
929 					} else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
930 						shift++;
931 						end--;
932 					} else {
933 						pci_err(dev, "Invalid ACS flags... Ignoring\n");
934 						return;
935 					}
936 				}
937 				p = delimit + 1;
938 			} else {
939 				pci_err(dev, "ACS Flags missing\n");
940 				return;
941 			}
942 		}
943 
944 		if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
945 			    PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
946 			pci_err(dev, "Invalid ACS flags specified\n");
947 			return;
948 		}
949 
950 		ret = pci_dev_str_match(dev, p, &p);
951 		if (ret < 0) {
952 			pr_info_once("PCI: Can't parse ACS command line parameter\n");
953 			break;
954 		} else if (ret == 1) {
955 			/* Found a match */
956 			break;
957 		}
958 
959 		if (*p != ';' && *p != ',') {
960 			/* End of param or invalid format */
961 			break;
962 		}
963 		p++;
964 	}
965 
966 	if (ret != 1)
967 		return;
968 
969 	if (!pci_dev_specific_disable_acs_redir(dev))
970 		return;
971 
972 	pci_dbg(dev, "ACS mask  = %#06x\n", mask);
973 	pci_dbg(dev, "ACS flags = %#06x\n", flags);
974 	pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl);
975 	pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl);
976 
977 	/*
978 	 * For mask bits that are 0, copy them from the firmware setting
979 	 * and apply flags for all the mask bits that are 1.
980 	 */
981 	caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask);
982 
983 	pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
984 }
985 
986 /**
987  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
988  * @dev: the PCI device
989  * @caps: default ACS controls
990  */
pci_std_enable_acs(struct pci_dev * dev,struct pci_acs * caps)991 static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
992 {
993 	/* Source Validation */
994 	caps->ctrl |= (dev->acs_capabilities & PCI_ACS_SV);
995 
996 	/* P2P Request Redirect */
997 	caps->ctrl |= (dev->acs_capabilities & PCI_ACS_RR);
998 
999 	/* P2P Completion Redirect */
1000 	caps->ctrl |= (dev->acs_capabilities & PCI_ACS_CR);
1001 
1002 	/* Upstream Forwarding */
1003 	caps->ctrl |= (dev->acs_capabilities & PCI_ACS_UF);
1004 
1005 	/* Enable Translation Blocking for external devices and noats */
1006 	if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
1007 		caps->ctrl |= (dev->acs_capabilities & PCI_ACS_TB);
1008 }
1009 
1010 /**
1011  * pci_enable_acs - enable ACS if hardware support it
1012  * @dev: the PCI device
1013  */
pci_enable_acs(struct pci_dev * dev)1014 void pci_enable_acs(struct pci_dev *dev)
1015 {
1016 	struct pci_acs caps;
1017 	bool enable_acs = false;
1018 	int pos;
1019 
1020 	/* If an iommu is present we start with kernel default caps */
1021 	if (pci_acs_enable) {
1022 		if (pci_dev_specific_enable_acs(dev))
1023 			enable_acs = true;
1024 	}
1025 
1026 	pos = dev->acs_cap;
1027 	if (!pos)
1028 		return;
1029 
1030 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
1031 	caps.fw_ctrl = caps.ctrl;
1032 
1033 	if (enable_acs)
1034 		pci_std_enable_acs(dev, &caps);
1035 
1036 	/*
1037 	 * Always apply caps from the command line, even if there is no iommu.
1038 	 * Trust that the admin has a reason to change the ACS settings.
1039 	 */
1040 	__pci_config_acs(dev, &caps, disable_acs_redir_param,
1041 			 PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
1042 			 ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
1043 	__pci_config_acs(dev, &caps, config_acs_param, 0, 0);
1044 
1045 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
1046 }
1047 
1048 /**
1049  * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
1050  * @dev: PCI device to have its BARs restored
1051  *
1052  * Restore the BAR values for a given device, so as to make it
1053  * accessible by its driver.
1054  */
pci_restore_bars(struct pci_dev * dev)1055 static void pci_restore_bars(struct pci_dev *dev)
1056 {
1057 	int i;
1058 
1059 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1060 		pci_update_resource(dev, i);
1061 }
1062 
platform_pci_power_manageable(struct pci_dev * dev)1063 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1064 {
1065 	if (pci_use_mid_pm())
1066 		return true;
1067 
1068 	return acpi_pci_power_manageable(dev);
1069 }
1070 
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)1071 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1072 					       pci_power_t t)
1073 {
1074 	if (pci_use_mid_pm())
1075 		return mid_pci_set_power_state(dev, t);
1076 
1077 	return acpi_pci_set_power_state(dev, t);
1078 }
1079 
platform_pci_get_power_state(struct pci_dev * dev)1080 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1081 {
1082 	if (pci_use_mid_pm())
1083 		return mid_pci_get_power_state(dev);
1084 
1085 	return acpi_pci_get_power_state(dev);
1086 }
1087 
platform_pci_refresh_power_state(struct pci_dev * dev)1088 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1089 {
1090 	if (!pci_use_mid_pm())
1091 		acpi_pci_refresh_power_state(dev);
1092 }
1093 
platform_pci_choose_state(struct pci_dev * dev)1094 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1095 {
1096 	if (pci_use_mid_pm())
1097 		return PCI_POWER_ERROR;
1098 
1099 	return acpi_pci_choose_state(dev);
1100 }
1101 
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)1102 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1103 {
1104 	if (pci_use_mid_pm())
1105 		return PCI_POWER_ERROR;
1106 
1107 	return acpi_pci_wakeup(dev, enable);
1108 }
1109 
platform_pci_need_resume(struct pci_dev * dev)1110 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1111 {
1112 	if (pci_use_mid_pm())
1113 		return false;
1114 
1115 	return acpi_pci_need_resume(dev);
1116 }
1117 
platform_pci_bridge_d3(struct pci_dev * dev)1118 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1119 {
1120 	if (pci_use_mid_pm())
1121 		return false;
1122 
1123 	return acpi_pci_bridge_d3(dev);
1124 }
1125 
1126 /**
1127  * pci_update_current_state - Read power state of given device and cache it
1128  * @dev: PCI device to handle.
1129  * @state: State to cache in case the device doesn't have the PM capability
1130  *
1131  * The power state is read from the PMCSR register, which however is
1132  * inaccessible in D3cold.  The platform firmware is therefore queried first
1133  * to detect accessibility of the register.  In case the platform firmware
1134  * reports an incorrect state or the device isn't power manageable by the
1135  * platform at all, we try to detect D3cold by testing accessibility of the
1136  * vendor ID in config space.
1137  */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)1138 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1139 {
1140 	if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1141 		dev->current_state = PCI_D3cold;
1142 	} else if (dev->pm_cap) {
1143 		u16 pmcsr;
1144 
1145 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1146 		if (PCI_POSSIBLE_ERROR(pmcsr)) {
1147 			dev->current_state = PCI_D3cold;
1148 			return;
1149 		}
1150 		dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1151 	} else {
1152 		dev->current_state = state;
1153 	}
1154 }
1155 
1156 /**
1157  * pci_refresh_power_state - Refresh the given device's power state data
1158  * @dev: Target PCI device.
1159  *
1160  * Ask the platform to refresh the devices power state information and invoke
1161  * pci_update_current_state() to update its current PCI power state.
1162  */
pci_refresh_power_state(struct pci_dev * dev)1163 void pci_refresh_power_state(struct pci_dev *dev)
1164 {
1165 	platform_pci_refresh_power_state(dev);
1166 	pci_update_current_state(dev, dev->current_state);
1167 }
1168 
1169 /**
1170  * pci_platform_power_transition - Use platform to change device power state
1171  * @dev: PCI device to handle.
1172  * @state: State to put the device into.
1173  */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)1174 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1175 {
1176 	int error;
1177 
1178 	error = platform_pci_set_power_state(dev, state);
1179 	if (!error)
1180 		pci_update_current_state(dev, state);
1181 	else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1182 		dev->current_state = PCI_D0;
1183 
1184 	return error;
1185 }
1186 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1187 
pci_resume_one(struct pci_dev * pci_dev,void * ign)1188 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1189 {
1190 	pm_request_resume(&pci_dev->dev);
1191 	return 0;
1192 }
1193 
1194 /**
1195  * pci_resume_bus - Walk given bus and runtime resume devices on it
1196  * @bus: Top bus of the subtree to walk.
1197  */
pci_resume_bus(struct pci_bus * bus)1198 void pci_resume_bus(struct pci_bus *bus)
1199 {
1200 	if (bus)
1201 		pci_walk_bus(bus, pci_resume_one, NULL);
1202 }
1203 
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)1204 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1205 {
1206 	int delay = 1;
1207 	bool retrain = false;
1208 	struct pci_dev *root, *bridge;
1209 
1210 	root = pcie_find_root_port(dev);
1211 
1212 	if (pci_is_pcie(dev)) {
1213 		bridge = pci_upstream_bridge(dev);
1214 		if (bridge)
1215 			retrain = true;
1216 	}
1217 
1218 	/*
1219 	 * The caller has already waited long enough after a reset that the
1220 	 * device should respond to config requests, but it may respond
1221 	 * with Request Retry Status (RRS) if it needs more time to
1222 	 * initialize.
1223 	 *
1224 	 * If the device is below a Root Port with Configuration RRS
1225 	 * Software Visibility enabled, reading the Vendor ID returns a
1226 	 * special data value if the device responded with RRS.  Read the
1227 	 * Vendor ID until we get non-RRS status.
1228 	 *
1229 	 * If there's no Root Port or Configuration RRS Software Visibility
1230 	 * is not enabled, the device may still respond with RRS, but
1231 	 * hardware may retry the config request.  If no retries receive
1232 	 * Successful Completion, hardware generally synthesizes ~0
1233 	 * (PCI_ERROR_RESPONSE) data to complete the read.  Reading Vendor
1234 	 * ID for VFs and non-existent devices also returns ~0, so read the
1235 	 * Command register until it returns something other than ~0.
1236 	 */
1237 	for (;;) {
1238 		u32 id;
1239 
1240 		if (pci_dev_is_disconnected(dev)) {
1241 			pci_dbg(dev, "disconnected; not waiting\n");
1242 			return -ENOTTY;
1243 		}
1244 
1245 		if (root && root->config_rrs_sv) {
1246 			pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
1247 			if (!pci_bus_rrs_vendor_id(id))
1248 				break;
1249 		} else {
1250 			pci_read_config_dword(dev, PCI_COMMAND, &id);
1251 			if (!PCI_POSSIBLE_ERROR(id))
1252 				break;
1253 		}
1254 
1255 		if (delay > timeout) {
1256 			pci_warn(dev, "not ready %dms after %s; giving up\n",
1257 				 delay - 1, reset_type);
1258 			return -ENOTTY;
1259 		}
1260 
1261 		if (delay > PCI_RESET_WAIT) {
1262 			if (retrain) {
1263 				retrain = false;
1264 				if (pcie_failed_link_retrain(bridge) == 0) {
1265 					delay = 1;
1266 					continue;
1267 				}
1268 			}
1269 			pci_info(dev, "not ready %dms after %s; waiting\n",
1270 				 delay - 1, reset_type);
1271 		}
1272 
1273 		msleep(delay);
1274 		delay *= 2;
1275 	}
1276 
1277 	if (delay > PCI_RESET_WAIT)
1278 		pci_info(dev, "ready %dms after %s\n", delay - 1,
1279 			 reset_type);
1280 	else
1281 		pci_dbg(dev, "ready %dms after %s\n", delay - 1,
1282 			reset_type);
1283 
1284 	return 0;
1285 }
1286 
1287 /**
1288  * pci_power_up - Put the given device into D0
1289  * @dev: PCI device to power up
1290  *
1291  * On success, return 0 or 1, depending on whether or not it is necessary to
1292  * restore the device's BARs subsequently (1 is returned in that case).
1293  *
1294  * On failure, return a negative error code.  Always return failure if @dev
1295  * lacks a Power Management Capability, even if the platform was able to
1296  * put the device in D0 via non-PCI means.
1297  */
pci_power_up(struct pci_dev * dev)1298 int pci_power_up(struct pci_dev *dev)
1299 {
1300 	bool need_restore;
1301 	pci_power_t state;
1302 	u16 pmcsr;
1303 
1304 	platform_pci_set_power_state(dev, PCI_D0);
1305 
1306 	if (!dev->pm_cap) {
1307 		state = platform_pci_get_power_state(dev);
1308 		if (state == PCI_UNKNOWN)
1309 			dev->current_state = PCI_D0;
1310 		else
1311 			dev->current_state = state;
1312 
1313 		return -EIO;
1314 	}
1315 
1316 	if (pci_dev_is_disconnected(dev)) {
1317 		dev->current_state = PCI_D3cold;
1318 		return -EIO;
1319 	}
1320 
1321 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1322 	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1323 		pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1324 			pci_power_name(dev->current_state));
1325 		dev->current_state = PCI_D3cold;
1326 		return -EIO;
1327 	}
1328 
1329 	state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1330 
1331 	need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1332 			!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1333 
1334 	if (state == PCI_D0)
1335 		goto end;
1336 
1337 	/*
1338 	 * Force the entire word to 0. This doesn't affect PME_Status, disables
1339 	 * PME_En, and sets PowerState to 0.
1340 	 */
1341 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1342 
1343 	/* Mandatory transition delays; see PCI PM 1.2. */
1344 	if (state == PCI_D3hot)
1345 		pci_dev_d3_sleep(dev);
1346 	else if (state == PCI_D2)
1347 		udelay(PCI_PM_D2_DELAY);
1348 
1349 end:
1350 	dev->current_state = PCI_D0;
1351 	if (need_restore)
1352 		return 1;
1353 
1354 	return 0;
1355 }
1356 
1357 /**
1358  * pci_set_full_power_state - Put a PCI device into D0 and update its state
1359  * @dev: PCI device to power up
1360  * @locked: whether pci_bus_sem is held
1361  *
1362  * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1363  * to confirm the state change, restore its BARs if they might be lost and
1364  * reconfigure ASPM in accordance with the new power state.
1365  *
1366  * If pci_restore_state() is going to be called right after a power state change
1367  * to D0, it is more efficient to use pci_power_up() directly instead of this
1368  * function.
1369  */
pci_set_full_power_state(struct pci_dev * dev,bool locked)1370 static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
1371 {
1372 	u16 pmcsr;
1373 	int ret;
1374 
1375 	ret = pci_power_up(dev);
1376 	if (ret < 0) {
1377 		if (dev->current_state == PCI_D0)
1378 			return 0;
1379 
1380 		return ret;
1381 	}
1382 
1383 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1384 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1385 	if (dev->current_state != PCI_D0) {
1386 		pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1387 				     pci_power_name(dev->current_state));
1388 	} else if (ret > 0) {
1389 		/*
1390 		 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1391 		 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1392 		 * from D3hot to D0 _may_ perform an internal reset, thereby
1393 		 * going to "D0 Uninitialized" rather than "D0 Initialized".
1394 		 * For example, at least some versions of the 3c905B and the
1395 		 * 3c556B exhibit this behaviour.
1396 		 *
1397 		 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1398 		 * devices in a D3hot state at boot.  Consequently, we need to
1399 		 * restore at least the BARs so that the device will be
1400 		 * accessible to its driver.
1401 		 */
1402 		pci_restore_bars(dev);
1403 	}
1404 
1405 	if (dev->bus->self)
1406 		pcie_aspm_pm_state_change(dev->bus->self, locked);
1407 
1408 	return 0;
1409 }
1410 
1411 /**
1412  * __pci_dev_set_current_state - Set current state of a PCI device
1413  * @dev: Device to handle
1414  * @data: pointer to state to be set
1415  */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1416 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1417 {
1418 	pci_power_t state = *(pci_power_t *)data;
1419 
1420 	dev->current_state = state;
1421 	return 0;
1422 }
1423 
1424 /**
1425  * pci_bus_set_current_state - Walk given bus and set current state of devices
1426  * @bus: Top bus of the subtree to walk.
1427  * @state: state to be set
1428  */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1429 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1430 {
1431 	if (bus)
1432 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1433 }
1434 
__pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state,bool locked)1435 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
1436 {
1437 	if (!bus)
1438 		return;
1439 
1440 	if (locked)
1441 		pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
1442 	else
1443 		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1444 }
1445 
1446 /**
1447  * pci_set_low_power_state - Put a PCI device into a low-power state.
1448  * @dev: PCI device to handle.
1449  * @state: PCI power state (D1, D2, D3hot) to put the device into.
1450  * @locked: whether pci_bus_sem is held
1451  *
1452  * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1453  *
1454  * RETURN VALUE:
1455  * -EINVAL if the requested state is invalid.
1456  * -EIO if device does not support PCI PM or its PM capabilities register has a
1457  * wrong version, or device doesn't support the requested state.
1458  * 0 if device already is in the requested state.
1459  * 0 if device's power state has been successfully changed.
1460  */
pci_set_low_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1461 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1462 {
1463 	u16 pmcsr;
1464 
1465 	if (!dev->pm_cap)
1466 		return -EIO;
1467 
1468 	/*
1469 	 * Validate transition: We can enter D0 from any state, but if
1470 	 * we're already in a low-power state, we can only go deeper.  E.g.,
1471 	 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1472 	 * we'd have to go from D3 to D0, then to D1.
1473 	 */
1474 	if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1475 		pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1476 			pci_power_name(dev->current_state),
1477 			pci_power_name(state));
1478 		return -EINVAL;
1479 	}
1480 
1481 	/* Check if this device supports the desired state */
1482 	if ((state == PCI_D1 && !dev->d1_support)
1483 	   || (state == PCI_D2 && !dev->d2_support))
1484 		return -EIO;
1485 
1486 	if (dev->current_state == state)
1487 		return 0;
1488 
1489 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1490 	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1491 		pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1492 			pci_power_name(dev->current_state),
1493 			pci_power_name(state));
1494 		dev->current_state = PCI_D3cold;
1495 		return -EIO;
1496 	}
1497 
1498 	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1499 	pmcsr |= state;
1500 
1501 	/* Enter specified state */
1502 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1503 
1504 	/* Mandatory power management transition delays; see PCI PM 1.2. */
1505 	if (state == PCI_D3hot)
1506 		pci_dev_d3_sleep(dev);
1507 	else if (state == PCI_D2)
1508 		udelay(PCI_PM_D2_DELAY);
1509 
1510 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1511 	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1512 	if (dev->current_state != state)
1513 		pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1514 				     pci_power_name(dev->current_state),
1515 				     pci_power_name(state));
1516 
1517 	if (dev->bus->self)
1518 		pcie_aspm_pm_state_change(dev->bus->self, locked);
1519 
1520 	return 0;
1521 }
1522 
__pci_set_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1523 static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1524 {
1525 	int error;
1526 
1527 	/* Bound the state we're entering */
1528 	if (state > PCI_D3cold)
1529 		state = PCI_D3cold;
1530 	else if (state < PCI_D0)
1531 		state = PCI_D0;
1532 	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1533 
1534 		/*
1535 		 * If the device or the parent bridge do not support PCI
1536 		 * PM, ignore the request if we're doing anything other
1537 		 * than putting it into D0 (which would only happen on
1538 		 * boot).
1539 		 */
1540 		return 0;
1541 
1542 	/* Check if we're already there */
1543 	if (dev->current_state == state)
1544 		return 0;
1545 
1546 	if (state == PCI_D0)
1547 		return pci_set_full_power_state(dev, locked);
1548 
1549 	/*
1550 	 * This device is quirked not to be put into D3, so don't put it in
1551 	 * D3
1552 	 */
1553 	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1554 		return 0;
1555 
1556 	if (state == PCI_D3cold) {
1557 		/*
1558 		 * To put the device in D3cold, put it into D3hot in the native
1559 		 * way, then put it into D3cold using platform ops.
1560 		 */
1561 		error = pci_set_low_power_state(dev, PCI_D3hot, locked);
1562 
1563 		if (pci_platform_power_transition(dev, PCI_D3cold))
1564 			return error;
1565 
1566 		/* Powering off a bridge may power off the whole hierarchy */
1567 		if (dev->current_state == PCI_D3cold)
1568 			__pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
1569 	} else {
1570 		error = pci_set_low_power_state(dev, state, locked);
1571 
1572 		if (pci_platform_power_transition(dev, state))
1573 			return error;
1574 	}
1575 
1576 	return 0;
1577 }
1578 
1579 /**
1580  * pci_set_power_state - Set the power state of a PCI device
1581  * @dev: PCI device to handle.
1582  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1583  *
1584  * Transition a device to a new power state, using the platform firmware and/or
1585  * the device's PCI PM registers.
1586  *
1587  * RETURN VALUE:
1588  * -EINVAL if the requested state is invalid.
1589  * -EIO if device does not support PCI PM or its PM capabilities register has a
1590  * wrong version, or device doesn't support the requested state.
1591  * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1592  * 0 if device already is in the requested state.
1593  * 0 if the transition is to D3 but D3 is not supported.
1594  * 0 if device's power state has been successfully changed.
1595  */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1596 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1597 {
1598 	return __pci_set_power_state(dev, state, false);
1599 }
1600 EXPORT_SYMBOL(pci_set_power_state);
1601 
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)1602 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
1603 {
1604 	lockdep_assert_held(&pci_bus_sem);
1605 
1606 	return __pci_set_power_state(dev, state, true);
1607 }
1608 EXPORT_SYMBOL(pci_set_power_state_locked);
1609 
1610 #define PCI_EXP_SAVE_REGS	7
1611 
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1612 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1613 						       u16 cap, bool extended)
1614 {
1615 	struct pci_cap_saved_state *tmp;
1616 
1617 	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1618 		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1619 			return tmp;
1620 	}
1621 	return NULL;
1622 }
1623 
pci_find_saved_cap(struct pci_dev * dev,char cap)1624 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1625 {
1626 	return _pci_find_saved_cap(dev, cap, false);
1627 }
1628 
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1629 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1630 {
1631 	return _pci_find_saved_cap(dev, cap, true);
1632 }
1633 
pci_save_pcie_state(struct pci_dev * dev)1634 static int pci_save_pcie_state(struct pci_dev *dev)
1635 {
1636 	int i = 0;
1637 	struct pci_cap_saved_state *save_state;
1638 	u16 *cap;
1639 
1640 	if (!pci_is_pcie(dev))
1641 		return 0;
1642 
1643 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1644 	if (!save_state) {
1645 		pci_err(dev, "buffer not found in %s\n", __func__);
1646 		return -ENOMEM;
1647 	}
1648 
1649 	cap = (u16 *)&save_state->cap.data[0];
1650 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1651 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1652 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1653 	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1654 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1655 	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1656 	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1657 
1658 	pci_save_aspm_l1ss_state(dev);
1659 	pci_save_ltr_state(dev);
1660 
1661 	return 0;
1662 }
1663 
pci_restore_pcie_state(struct pci_dev * dev)1664 static void pci_restore_pcie_state(struct pci_dev *dev)
1665 {
1666 	int i = 0;
1667 	struct pci_cap_saved_state *save_state;
1668 	u16 *cap;
1669 
1670 	/*
1671 	 * Restore max latencies (in the LTR capability) before enabling
1672 	 * LTR itself in PCI_EXP_DEVCTL2.
1673 	 */
1674 	pci_restore_ltr_state(dev);
1675 	pci_restore_aspm_l1ss_state(dev);
1676 
1677 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1678 	if (!save_state)
1679 		return;
1680 
1681 	/*
1682 	 * Downstream ports reset the LTR enable bit when link goes down.
1683 	 * Check and re-configure the bit here before restoring device.
1684 	 * PCIe r5.0, sec 7.5.3.16.
1685 	 */
1686 	pci_bridge_reconfigure_ltr(dev);
1687 
1688 	cap = (u16 *)&save_state->cap.data[0];
1689 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1690 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1691 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1692 	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1693 	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1694 	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1695 	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1696 }
1697 
pci_save_pcix_state(struct pci_dev * dev)1698 static int pci_save_pcix_state(struct pci_dev *dev)
1699 {
1700 	int pos;
1701 	struct pci_cap_saved_state *save_state;
1702 
1703 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1704 	if (!pos)
1705 		return 0;
1706 
1707 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1708 	if (!save_state) {
1709 		pci_err(dev, "buffer not found in %s\n", __func__);
1710 		return -ENOMEM;
1711 	}
1712 
1713 	pci_read_config_word(dev, pos + PCI_X_CMD,
1714 			     (u16 *)save_state->cap.data);
1715 
1716 	return 0;
1717 }
1718 
pci_restore_pcix_state(struct pci_dev * dev)1719 static void pci_restore_pcix_state(struct pci_dev *dev)
1720 {
1721 	int i = 0, pos;
1722 	struct pci_cap_saved_state *save_state;
1723 	u16 *cap;
1724 
1725 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1726 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1727 	if (!save_state || !pos)
1728 		return;
1729 	cap = (u16 *)&save_state->cap.data[0];
1730 
1731 	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1732 }
1733 
1734 /**
1735  * pci_save_state - save the PCI configuration space of a device before
1736  *		    suspending
1737  * @dev: PCI device that we're dealing with
1738  */
pci_save_state(struct pci_dev * dev)1739 int pci_save_state(struct pci_dev *dev)
1740 {
1741 	int i;
1742 	/* XXX: 100% dword access ok here? */
1743 	for (i = 0; i < 16; i++) {
1744 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1745 		pci_dbg(dev, "save config %#04x: %#010x\n",
1746 			i * 4, dev->saved_config_space[i]);
1747 	}
1748 	dev->state_saved = true;
1749 
1750 	i = pci_save_pcie_state(dev);
1751 	if (i != 0)
1752 		return i;
1753 
1754 	i = pci_save_pcix_state(dev);
1755 	if (i != 0)
1756 		return i;
1757 
1758 	pci_save_dpc_state(dev);
1759 	pci_save_aer_state(dev);
1760 	pci_save_ptm_state(dev);
1761 	pci_save_tph_state(dev);
1762 	return pci_save_vc_state(dev);
1763 }
1764 EXPORT_SYMBOL(pci_save_state);
1765 
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1766 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1767 				     u32 saved_val, int retry, bool force)
1768 {
1769 	u32 val;
1770 
1771 	pci_read_config_dword(pdev, offset, &val);
1772 	if (!force && val == saved_val)
1773 		return;
1774 
1775 	for (;;) {
1776 		pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n",
1777 			offset, val, saved_val);
1778 		pci_write_config_dword(pdev, offset, saved_val);
1779 		if (retry-- <= 0)
1780 			return;
1781 
1782 		pci_read_config_dword(pdev, offset, &val);
1783 		if (val == saved_val)
1784 			return;
1785 
1786 		mdelay(1);
1787 	}
1788 }
1789 
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1790 static void pci_restore_config_space_range(struct pci_dev *pdev,
1791 					   int start, int end, int retry,
1792 					   bool force)
1793 {
1794 	int index;
1795 
1796 	for (index = end; index >= start; index--)
1797 		pci_restore_config_dword(pdev, 4 * index,
1798 					 pdev->saved_config_space[index],
1799 					 retry, force);
1800 }
1801 
pci_restore_config_space(struct pci_dev * pdev)1802 static void pci_restore_config_space(struct pci_dev *pdev)
1803 {
1804 	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1805 		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1806 		/* Restore BARs before the command register. */
1807 		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1808 		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1809 	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1810 		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1811 
1812 		/*
1813 		 * Force rewriting of prefetch registers to avoid S3 resume
1814 		 * issues on Intel PCI bridges that occur when these
1815 		 * registers are not explicitly written.
1816 		 */
1817 		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1818 		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1819 	} else {
1820 		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1821 	}
1822 }
1823 
1824 /**
1825  * pci_restore_state - Restore the saved state of a PCI device
1826  * @dev: PCI device that we're dealing with
1827  */
pci_restore_state(struct pci_dev * dev)1828 void pci_restore_state(struct pci_dev *dev)
1829 {
1830 	pci_restore_pcie_state(dev);
1831 	pci_restore_pasid_state(dev);
1832 	pci_restore_pri_state(dev);
1833 	pci_restore_ats_state(dev);
1834 	pci_restore_vc_state(dev);
1835 	pci_restore_rebar_state(dev);
1836 	pci_restore_dpc_state(dev);
1837 	pci_restore_ptm_state(dev);
1838 	pci_restore_tph_state(dev);
1839 
1840 	pci_aer_clear_status(dev);
1841 	pci_restore_aer_state(dev);
1842 
1843 	pci_restore_config_space(dev);
1844 
1845 	pci_restore_pcix_state(dev);
1846 	pci_restore_msi_state(dev);
1847 
1848 	/* Restore ACS and IOV configuration state */
1849 	pci_enable_acs(dev);
1850 	pci_restore_iov_state(dev);
1851 
1852 	dev->state_saved = false;
1853 }
1854 EXPORT_SYMBOL(pci_restore_state);
1855 
1856 struct pci_saved_state {
1857 	u32 config_space[16];
1858 	struct pci_cap_saved_data cap[];
1859 };
1860 
1861 /**
1862  * pci_store_saved_state - Allocate and return an opaque struct containing
1863  *			   the device saved state.
1864  * @dev: PCI device that we're dealing with
1865  *
1866  * Return NULL if no state or error.
1867  */
pci_store_saved_state(struct pci_dev * dev)1868 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1869 {
1870 	struct pci_saved_state *state;
1871 	struct pci_cap_saved_state *tmp;
1872 	struct pci_cap_saved_data *cap;
1873 	size_t size;
1874 
1875 	if (!dev->state_saved)
1876 		return NULL;
1877 
1878 	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1879 
1880 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1881 		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1882 
1883 	state = kzalloc(size, GFP_KERNEL);
1884 	if (!state)
1885 		return NULL;
1886 
1887 	memcpy(state->config_space, dev->saved_config_space,
1888 	       sizeof(state->config_space));
1889 
1890 	cap = state->cap;
1891 	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1892 		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1893 		memcpy(cap, &tmp->cap, len);
1894 		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1895 	}
1896 	/* Empty cap_save terminates list */
1897 
1898 	return state;
1899 }
1900 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1901 
1902 /**
1903  * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1904  * @dev: PCI device that we're dealing with
1905  * @state: Saved state returned from pci_store_saved_state()
1906  */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)1907 int pci_load_saved_state(struct pci_dev *dev,
1908 			 struct pci_saved_state *state)
1909 {
1910 	struct pci_cap_saved_data *cap;
1911 
1912 	dev->state_saved = false;
1913 
1914 	if (!state)
1915 		return 0;
1916 
1917 	memcpy(dev->saved_config_space, state->config_space,
1918 	       sizeof(state->config_space));
1919 
1920 	cap = state->cap;
1921 	while (cap->size) {
1922 		struct pci_cap_saved_state *tmp;
1923 
1924 		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1925 		if (!tmp || tmp->cap.size != cap->size)
1926 			return -EINVAL;
1927 
1928 		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1929 		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1930 		       sizeof(struct pci_cap_saved_data) + cap->size);
1931 	}
1932 
1933 	dev->state_saved = true;
1934 	return 0;
1935 }
1936 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1937 
1938 /**
1939  * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1940  *				   and free the memory allocated for it.
1941  * @dev: PCI device that we're dealing with
1942  * @state: Pointer to saved state returned from pci_store_saved_state()
1943  */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)1944 int pci_load_and_free_saved_state(struct pci_dev *dev,
1945 				  struct pci_saved_state **state)
1946 {
1947 	int ret = pci_load_saved_state(dev, *state);
1948 	kfree(*state);
1949 	*state = NULL;
1950 	return ret;
1951 }
1952 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1953 
pcibios_enable_device(struct pci_dev * dev,int bars)1954 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1955 {
1956 	return pci_enable_resources(dev, bars);
1957 }
1958 
pci_host_bridge_enable_device(struct pci_dev * dev)1959 static int pci_host_bridge_enable_device(struct pci_dev *dev)
1960 {
1961 	struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
1962 	int err;
1963 
1964 	if (host_bridge && host_bridge->enable_device) {
1965 		err = host_bridge->enable_device(host_bridge, dev);
1966 		if (err)
1967 			return err;
1968 	}
1969 
1970 	return 0;
1971 }
1972 
pci_host_bridge_disable_device(struct pci_dev * dev)1973 static void pci_host_bridge_disable_device(struct pci_dev *dev)
1974 {
1975 	struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
1976 
1977 	if (host_bridge && host_bridge->disable_device)
1978 		host_bridge->disable_device(host_bridge, dev);
1979 }
1980 
do_pci_enable_device(struct pci_dev * dev,int bars)1981 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1982 {
1983 	int err;
1984 	struct pci_dev *bridge;
1985 	u16 cmd;
1986 	u8 pin;
1987 
1988 	err = pci_set_power_state(dev, PCI_D0);
1989 	if (err < 0 && err != -EIO)
1990 		return err;
1991 
1992 	bridge = pci_upstream_bridge(dev);
1993 	if (bridge)
1994 		pcie_aspm_powersave_config_link(bridge);
1995 
1996 	err = pci_host_bridge_enable_device(dev);
1997 	if (err)
1998 		return err;
1999 
2000 	err = pcibios_enable_device(dev, bars);
2001 	if (err < 0)
2002 		goto err_enable;
2003 	pci_fixup_device(pci_fixup_enable, dev);
2004 
2005 	if (dev->msi_enabled || dev->msix_enabled)
2006 		return 0;
2007 
2008 	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
2009 	if (pin) {
2010 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
2011 		if (cmd & PCI_COMMAND_INTX_DISABLE)
2012 			pci_write_config_word(dev, PCI_COMMAND,
2013 					      cmd & ~PCI_COMMAND_INTX_DISABLE);
2014 	}
2015 
2016 	return 0;
2017 
2018 err_enable:
2019 	pci_host_bridge_disable_device(dev);
2020 
2021 	return err;
2022 
2023 }
2024 
2025 /**
2026  * pci_reenable_device - Resume abandoned device
2027  * @dev: PCI device to be resumed
2028  *
2029  * NOTE: This function is a backend of pci_default_resume() and is not supposed
2030  * to be called by normal code, write proper resume handler and use it instead.
2031  */
pci_reenable_device(struct pci_dev * dev)2032 int pci_reenable_device(struct pci_dev *dev)
2033 {
2034 	if (pci_is_enabled(dev))
2035 		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
2036 	return 0;
2037 }
2038 EXPORT_SYMBOL(pci_reenable_device);
2039 
pci_enable_bridge(struct pci_dev * dev)2040 static void pci_enable_bridge(struct pci_dev *dev)
2041 {
2042 	struct pci_dev *bridge;
2043 	int retval;
2044 
2045 	bridge = pci_upstream_bridge(dev);
2046 	if (bridge)
2047 		pci_enable_bridge(bridge);
2048 
2049 	if (pci_is_enabled(dev)) {
2050 		if (!dev->is_busmaster)
2051 			pci_set_master(dev);
2052 		return;
2053 	}
2054 
2055 	retval = pci_enable_device(dev);
2056 	if (retval)
2057 		pci_err(dev, "Error enabling bridge (%d), continuing\n",
2058 			retval);
2059 	pci_set_master(dev);
2060 }
2061 
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)2062 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
2063 {
2064 	struct pci_dev *bridge;
2065 	int err;
2066 	int i, bars = 0;
2067 
2068 	/*
2069 	 * Power state could be unknown at this point, either due to a fresh
2070 	 * boot or a device removal call.  So get the current power state
2071 	 * so that things like MSI message writing will behave as expected
2072 	 * (e.g. if the device really is in D0 at enable time).
2073 	 */
2074 	pci_update_current_state(dev, dev->current_state);
2075 
2076 	if (atomic_inc_return(&dev->enable_cnt) > 1)
2077 		return 0;		/* already enabled */
2078 
2079 	bridge = pci_upstream_bridge(dev);
2080 	if (bridge)
2081 		pci_enable_bridge(bridge);
2082 
2083 	/* only skip sriov related */
2084 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2085 		if (dev->resource[i].flags & flags)
2086 			bars |= (1 << i);
2087 	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2088 		if (dev->resource[i].flags & flags)
2089 			bars |= (1 << i);
2090 
2091 	err = do_pci_enable_device(dev, bars);
2092 	if (err < 0)
2093 		atomic_dec(&dev->enable_cnt);
2094 	return err;
2095 }
2096 
2097 /**
2098  * pci_enable_device_mem - Initialize a device for use with Memory space
2099  * @dev: PCI device to be initialized
2100  *
2101  * Initialize device before it's used by a driver. Ask low-level code
2102  * to enable Memory resources. Wake up the device if it was suspended.
2103  * Beware, this function can fail.
2104  */
pci_enable_device_mem(struct pci_dev * dev)2105 int pci_enable_device_mem(struct pci_dev *dev)
2106 {
2107 	return pci_enable_device_flags(dev, IORESOURCE_MEM);
2108 }
2109 EXPORT_SYMBOL(pci_enable_device_mem);
2110 
2111 /**
2112  * pci_enable_device - Initialize device before it's used by a driver.
2113  * @dev: PCI device to be initialized
2114  *
2115  * Initialize device before it's used by a driver. Ask low-level code
2116  * to enable I/O and memory. Wake up the device if it was suspended.
2117  * Beware, this function can fail.
2118  *
2119  * Note we don't actually enable the device many times if we call
2120  * this function repeatedly (we just increment the count).
2121  */
pci_enable_device(struct pci_dev * dev)2122 int pci_enable_device(struct pci_dev *dev)
2123 {
2124 	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2125 }
2126 EXPORT_SYMBOL(pci_enable_device);
2127 
2128 /*
2129  * pcibios_device_add - provide arch specific hooks when adding device dev
2130  * @dev: the PCI device being added
2131  *
2132  * Permits the platform to provide architecture specific functionality when
2133  * devices are added. This is the default implementation. Architecture
2134  * implementations can override this.
2135  */
pcibios_device_add(struct pci_dev * dev)2136 int __weak pcibios_device_add(struct pci_dev *dev)
2137 {
2138 	return 0;
2139 }
2140 
2141 /**
2142  * pcibios_release_device - provide arch specific hooks when releasing
2143  *			    device dev
2144  * @dev: the PCI device being released
2145  *
2146  * Permits the platform to provide architecture specific functionality when
2147  * devices are released. This is the default implementation. Architecture
2148  * implementations can override this.
2149  */
pcibios_release_device(struct pci_dev * dev)2150 void __weak pcibios_release_device(struct pci_dev *dev) {}
2151 
2152 /**
2153  * pcibios_disable_device - disable arch specific PCI resources for device dev
2154  * @dev: the PCI device to disable
2155  *
2156  * Disables architecture specific PCI resources for the device. This
2157  * is the default implementation. Architecture implementations can
2158  * override this.
2159  */
pcibios_disable_device(struct pci_dev * dev)2160 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2161 
do_pci_disable_device(struct pci_dev * dev)2162 static void do_pci_disable_device(struct pci_dev *dev)
2163 {
2164 	u16 pci_command;
2165 
2166 	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2167 	if (pci_command & PCI_COMMAND_MASTER) {
2168 		pci_command &= ~PCI_COMMAND_MASTER;
2169 		pci_write_config_word(dev, PCI_COMMAND, pci_command);
2170 	}
2171 
2172 	pcibios_disable_device(dev);
2173 }
2174 
2175 /**
2176  * pci_disable_enabled_device - Disable device without updating enable_cnt
2177  * @dev: PCI device to disable
2178  *
2179  * NOTE: This function is a backend of PCI power management routines and is
2180  * not supposed to be called drivers.
2181  */
pci_disable_enabled_device(struct pci_dev * dev)2182 void pci_disable_enabled_device(struct pci_dev *dev)
2183 {
2184 	if (pci_is_enabled(dev))
2185 		do_pci_disable_device(dev);
2186 }
2187 
2188 /**
2189  * pci_disable_device - Disable PCI device after use
2190  * @dev: PCI device to be disabled
2191  *
2192  * Signal to the system that the PCI device is not in use by the system
2193  * anymore.  This only involves disabling PCI bus-mastering, if active.
2194  *
2195  * Note we don't actually disable the device until all callers of
2196  * pci_enable_device() have called pci_disable_device().
2197  */
pci_disable_device(struct pci_dev * dev)2198 void pci_disable_device(struct pci_dev *dev)
2199 {
2200 	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2201 		      "disabling already-disabled device");
2202 
2203 	if (atomic_dec_return(&dev->enable_cnt) != 0)
2204 		return;
2205 
2206 	pci_host_bridge_disable_device(dev);
2207 
2208 	do_pci_disable_device(dev);
2209 
2210 	dev->is_busmaster = 0;
2211 }
2212 EXPORT_SYMBOL(pci_disable_device);
2213 
2214 /**
2215  * pcibios_set_pcie_reset_state - set reset state for device dev
2216  * @dev: the PCIe device reset
2217  * @state: Reset state to enter into
2218  *
2219  * Set the PCIe reset state for the device. This is the default
2220  * implementation. Architecture implementations can override this.
2221  */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2222 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2223 					enum pcie_reset_state state)
2224 {
2225 	return -EINVAL;
2226 }
2227 
2228 /**
2229  * pci_set_pcie_reset_state - set reset state for device dev
2230  * @dev: the PCIe device reset
2231  * @state: Reset state to enter into
2232  *
2233  * Sets the PCI reset state for the device.
2234  */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2235 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2236 {
2237 	return pcibios_set_pcie_reset_state(dev, state);
2238 }
2239 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2240 
2241 #ifdef CONFIG_PCIEAER
pcie_clear_device_status(struct pci_dev * dev)2242 void pcie_clear_device_status(struct pci_dev *dev)
2243 {
2244 	u16 sta;
2245 
2246 	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2247 	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2248 }
2249 #endif
2250 
2251 /**
2252  * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2253  * @dev: PCIe root port or event collector.
2254  */
pcie_clear_root_pme_status(struct pci_dev * dev)2255 void pcie_clear_root_pme_status(struct pci_dev *dev)
2256 {
2257 	pcie_capability_write_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2258 }
2259 
2260 /**
2261  * pci_check_pme_status - Check if given device has generated PME.
2262  * @dev: Device to check.
2263  *
2264  * Check the PME status of the device and if set, clear it and clear PME enable
2265  * (if set).  Return 'true' if PME status and PME enable were both set or
2266  * 'false' otherwise.
2267  */
pci_check_pme_status(struct pci_dev * dev)2268 bool pci_check_pme_status(struct pci_dev *dev)
2269 {
2270 	int pmcsr_pos;
2271 	u16 pmcsr;
2272 	bool ret = false;
2273 
2274 	if (!dev->pm_cap)
2275 		return false;
2276 
2277 	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2278 	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2279 	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2280 		return false;
2281 
2282 	/* Clear PME status. */
2283 	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2284 	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2285 		/* Disable PME to avoid interrupt flood. */
2286 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2287 		ret = true;
2288 	}
2289 
2290 	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2291 
2292 	return ret;
2293 }
2294 
2295 /**
2296  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2297  * @dev: Device to handle.
2298  * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2299  *
2300  * Check if @dev has generated PME and queue a resume request for it in that
2301  * case.
2302  */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2303 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2304 {
2305 	if (pme_poll_reset && dev->pme_poll)
2306 		dev->pme_poll = false;
2307 
2308 	if (pci_check_pme_status(dev)) {
2309 		pci_wakeup_event(dev);
2310 		pm_request_resume(&dev->dev);
2311 	}
2312 	return 0;
2313 }
2314 
2315 /**
2316  * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2317  * @bus: Top bus of the subtree to walk.
2318  */
pci_pme_wakeup_bus(struct pci_bus * bus)2319 void pci_pme_wakeup_bus(struct pci_bus *bus)
2320 {
2321 	if (bus)
2322 		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2323 }
2324 
2325 
2326 /**
2327  * pci_pme_capable - check the capability of PCI device to generate PME#
2328  * @dev: PCI device to handle.
2329  * @state: PCI state from which device will issue PME#.
2330  */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2331 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2332 {
2333 	if (!dev->pm_cap)
2334 		return false;
2335 
2336 	return !!(dev->pme_support & (1 << state));
2337 }
2338 EXPORT_SYMBOL(pci_pme_capable);
2339 
pci_pme_list_scan(struct work_struct * work)2340 static void pci_pme_list_scan(struct work_struct *work)
2341 {
2342 	struct pci_pme_device *pme_dev, *n;
2343 
2344 	mutex_lock(&pci_pme_list_mutex);
2345 	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2346 		struct pci_dev *pdev = pme_dev->dev;
2347 
2348 		if (pdev->pme_poll) {
2349 			struct pci_dev *bridge = pdev->bus->self;
2350 			struct device *dev = &pdev->dev;
2351 			struct device *bdev = bridge ? &bridge->dev : NULL;
2352 			int bref = 0;
2353 
2354 			/*
2355 			 * If we have a bridge, it should be in an active/D0
2356 			 * state or the configuration space of subordinate
2357 			 * devices may not be accessible or stable over the
2358 			 * course of the call.
2359 			 */
2360 			if (bdev) {
2361 				bref = pm_runtime_get_if_active(bdev);
2362 				if (!bref)
2363 					continue;
2364 
2365 				if (bridge->current_state != PCI_D0)
2366 					goto put_bridge;
2367 			}
2368 
2369 			/*
2370 			 * The device itself should be suspended but config
2371 			 * space must be accessible, therefore it cannot be in
2372 			 * D3cold.
2373 			 */
2374 			if (pm_runtime_suspended(dev) &&
2375 			    pdev->current_state != PCI_D3cold)
2376 				pci_pme_wakeup(pdev, NULL);
2377 
2378 put_bridge:
2379 			if (bref > 0)
2380 				pm_runtime_put(bdev);
2381 		} else {
2382 			list_del(&pme_dev->list);
2383 			kfree(pme_dev);
2384 		}
2385 	}
2386 	if (!list_empty(&pci_pme_list))
2387 		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2388 				   msecs_to_jiffies(PME_TIMEOUT));
2389 	mutex_unlock(&pci_pme_list_mutex);
2390 }
2391 
__pci_pme_active(struct pci_dev * dev,bool enable)2392 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2393 {
2394 	u16 pmcsr;
2395 
2396 	if (!dev->pme_support)
2397 		return;
2398 
2399 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2400 	/* Clear PME_Status by writing 1 to it and enable PME# */
2401 	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2402 	if (!enable)
2403 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2404 
2405 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2406 }
2407 
2408 /**
2409  * pci_pme_restore - Restore PME configuration after config space restore.
2410  * @dev: PCI device to update.
2411  */
pci_pme_restore(struct pci_dev * dev)2412 void pci_pme_restore(struct pci_dev *dev)
2413 {
2414 	u16 pmcsr;
2415 
2416 	if (!dev->pme_support)
2417 		return;
2418 
2419 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2420 	if (dev->wakeup_prepared) {
2421 		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2422 		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2423 	} else {
2424 		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2425 		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2426 	}
2427 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2428 }
2429 
2430 /**
2431  * pci_pme_active - enable or disable PCI device's PME# function
2432  * @dev: PCI device to handle.
2433  * @enable: 'true' to enable PME# generation; 'false' to disable it.
2434  *
2435  * The caller must verify that the device is capable of generating PME# before
2436  * calling this function with @enable equal to 'true'.
2437  */
pci_pme_active(struct pci_dev * dev,bool enable)2438 void pci_pme_active(struct pci_dev *dev, bool enable)
2439 {
2440 	__pci_pme_active(dev, enable);
2441 
2442 	/*
2443 	 * PCI (as opposed to PCIe) PME requires that the device have
2444 	 * its PME# line hooked up correctly. Not all hardware vendors
2445 	 * do this, so the PME never gets delivered and the device
2446 	 * remains asleep. The easiest way around this is to
2447 	 * periodically walk the list of suspended devices and check
2448 	 * whether any have their PME flag set. The assumption is that
2449 	 * we'll wake up often enough anyway that this won't be a huge
2450 	 * hit, and the power savings from the devices will still be a
2451 	 * win.
2452 	 *
2453 	 * Although PCIe uses in-band PME message instead of PME# line
2454 	 * to report PME, PME does not work for some PCIe devices in
2455 	 * reality.  For example, there are devices that set their PME
2456 	 * status bits, but don't really bother to send a PME message;
2457 	 * there are PCI Express Root Ports that don't bother to
2458 	 * trigger interrupts when they receive PME messages from the
2459 	 * devices below.  So PME poll is used for PCIe devices too.
2460 	 */
2461 
2462 	if (dev->pme_poll) {
2463 		struct pci_pme_device *pme_dev;
2464 		if (enable) {
2465 			pme_dev = kmalloc_obj(struct pci_pme_device);
2466 			if (!pme_dev) {
2467 				pci_warn(dev, "can't enable PME#\n");
2468 				return;
2469 			}
2470 			pme_dev->dev = dev;
2471 			mutex_lock(&pci_pme_list_mutex);
2472 			list_add(&pme_dev->list, &pci_pme_list);
2473 			if (list_is_singular(&pci_pme_list))
2474 				queue_delayed_work(system_freezable_wq,
2475 						   &pci_pme_work,
2476 						   msecs_to_jiffies(PME_TIMEOUT));
2477 			mutex_unlock(&pci_pme_list_mutex);
2478 		} else {
2479 			mutex_lock(&pci_pme_list_mutex);
2480 			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2481 				if (pme_dev->dev == dev) {
2482 					list_del(&pme_dev->list);
2483 					kfree(pme_dev);
2484 					break;
2485 				}
2486 			}
2487 			mutex_unlock(&pci_pme_list_mutex);
2488 		}
2489 	}
2490 
2491 	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2492 }
2493 EXPORT_SYMBOL(pci_pme_active);
2494 
2495 /**
2496  * __pci_enable_wake - enable PCI device as wakeup event source
2497  * @dev: PCI device affected
2498  * @state: PCI state from which device will issue wakeup events
2499  * @enable: True to enable event generation; false to disable
2500  *
2501  * This enables the device as a wakeup event source, or disables it.
2502  * When such events involves platform-specific hooks, those hooks are
2503  * called automatically by this routine.
2504  *
2505  * Devices with legacy power management (no standard PCI PM capabilities)
2506  * always require such platform hooks.
2507  *
2508  * RETURN VALUE:
2509  * 0 is returned on success
2510  * -EINVAL is returned if device is not supposed to wake up the system
2511  * Error code depending on the platform is returned if both the platform and
2512  * the native mechanism fail to enable the generation of wake-up events
2513  */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2514 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2515 {
2516 	int ret = 0;
2517 
2518 	/*
2519 	 * Bridges that are not power-manageable directly only signal
2520 	 * wakeup on behalf of subordinate devices which is set up
2521 	 * elsewhere, so skip them. However, bridges that are
2522 	 * power-manageable may signal wakeup for themselves (for example,
2523 	 * on a hotplug event) and they need to be covered here.
2524 	 */
2525 	if (!pci_power_manageable(dev))
2526 		return 0;
2527 
2528 	/* Don't do the same thing twice in a row for one device. */
2529 	if (!!enable == !!dev->wakeup_prepared)
2530 		return 0;
2531 
2532 	/*
2533 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2534 	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2535 	 * enable.  To disable wake-up we call the platform first, for symmetry.
2536 	 */
2537 
2538 	if (enable) {
2539 		int error;
2540 
2541 		/*
2542 		 * Enable PME signaling if the device can signal PME from
2543 		 * D3cold regardless of whether or not it can signal PME from
2544 		 * the current target state, because that will allow it to
2545 		 * signal PME when the hierarchy above it goes into D3cold and
2546 		 * the device itself ends up in D3cold as a result of that.
2547 		 */
2548 		if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2549 			pci_pme_active(dev, true);
2550 		else
2551 			ret = 1;
2552 		error = platform_pci_set_wakeup(dev, true);
2553 		if (ret)
2554 			ret = error;
2555 		if (!ret)
2556 			dev->wakeup_prepared = true;
2557 	} else {
2558 		platform_pci_set_wakeup(dev, false);
2559 		pci_pme_active(dev, false);
2560 		dev->wakeup_prepared = false;
2561 	}
2562 
2563 	return ret;
2564 }
2565 
2566 /**
2567  * pci_enable_wake - change wakeup settings for a PCI device
2568  * @pci_dev: Target device
2569  * @state: PCI state from which device will issue wakeup events
2570  * @enable: Whether or not to enable event generation
2571  *
2572  * If @enable is set, check device_may_wakeup() for the device before calling
2573  * __pci_enable_wake() for it.
2574  */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2575 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2576 {
2577 	if (enable && !device_may_wakeup(&pci_dev->dev))
2578 		return -EINVAL;
2579 
2580 	return __pci_enable_wake(pci_dev, state, enable);
2581 }
2582 EXPORT_SYMBOL(pci_enable_wake);
2583 
2584 /**
2585  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2586  * @dev: PCI device to prepare
2587  * @enable: True to enable wake-up event generation; false to disable
2588  *
2589  * Many drivers want the device to wake up the system from D3_hot or D3_cold
2590  * and this function allows them to set that up cleanly - pci_enable_wake()
2591  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2592  * ordering constraints.
2593  *
2594  * This function only returns error code if the device is not allowed to wake
2595  * up the system from sleep or it is not capable of generating PME# from both
2596  * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2597  */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2598 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2599 {
2600 	return pci_pme_capable(dev, PCI_D3cold) ?
2601 			pci_enable_wake(dev, PCI_D3cold, enable) :
2602 			pci_enable_wake(dev, PCI_D3hot, enable);
2603 }
2604 EXPORT_SYMBOL(pci_wake_from_d3);
2605 
2606 /**
2607  * pci_target_state - find an appropriate low power state for a given PCI dev
2608  * @dev: PCI device
2609  * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2610  *
2611  * Use underlying platform code to find a supported low power state for @dev.
2612  * If the platform can't manage @dev, return the deepest state from which it
2613  * can generate wake events, based on any available PME info.
2614  */
pci_target_state(struct pci_dev * dev,bool wakeup)2615 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2616 {
2617 	if (platform_pci_power_manageable(dev)) {
2618 		/*
2619 		 * Call the platform to find the target state for the device.
2620 		 */
2621 		pci_power_t state = platform_pci_choose_state(dev);
2622 
2623 		switch (state) {
2624 		case PCI_POWER_ERROR:
2625 		case PCI_UNKNOWN:
2626 			return PCI_D3hot;
2627 
2628 		case PCI_D1:
2629 		case PCI_D2:
2630 			if (pci_no_d1d2(dev))
2631 				return PCI_D3hot;
2632 		}
2633 
2634 		return state;
2635 	}
2636 
2637 	/*
2638 	 * If the device is in D3cold even though it's not power-manageable by
2639 	 * the platform, it may have been powered down by non-standard means.
2640 	 * Best to let it slumber.
2641 	 */
2642 	if (dev->current_state == PCI_D3cold)
2643 		return PCI_D3cold;
2644 	else if (!dev->pm_cap)
2645 		return PCI_D0;
2646 
2647 	if (wakeup && dev->pme_support) {
2648 		pci_power_t state = PCI_D3hot;
2649 
2650 		/*
2651 		 * Find the deepest state from which the device can generate
2652 		 * PME#.
2653 		 */
2654 		while (state && !(dev->pme_support & (1 << state)))
2655 			state--;
2656 
2657 		if (state)
2658 			return state;
2659 		else if (dev->pme_support & 1)
2660 			return PCI_D0;
2661 	}
2662 
2663 	return PCI_D3hot;
2664 }
2665 
2666 /**
2667  * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2668  *			  into a sleep state
2669  * @dev: Device to handle.
2670  *
2671  * Choose the power state appropriate for the device depending on whether
2672  * it can wake up the system and/or is power manageable by the platform
2673  * (PCI_D3hot is the default) and put the device into that state.
2674  */
pci_prepare_to_sleep(struct pci_dev * dev)2675 int pci_prepare_to_sleep(struct pci_dev *dev)
2676 {
2677 	bool wakeup = device_may_wakeup(&dev->dev);
2678 	pci_power_t target_state = pci_target_state(dev, wakeup);
2679 	int error;
2680 
2681 	if (target_state == PCI_POWER_ERROR)
2682 		return -EIO;
2683 
2684 	pci_enable_wake(dev, target_state, wakeup);
2685 
2686 	error = pci_set_power_state(dev, target_state);
2687 
2688 	if (error)
2689 		pci_enable_wake(dev, target_state, false);
2690 
2691 	return error;
2692 }
2693 EXPORT_SYMBOL(pci_prepare_to_sleep);
2694 
2695 /**
2696  * pci_back_from_sleep - turn PCI device on during system-wide transition
2697  *			 into working state
2698  * @dev: Device to handle.
2699  *
2700  * Disable device's system wake-up capability and put it into D0.
2701  */
pci_back_from_sleep(struct pci_dev * dev)2702 int pci_back_from_sleep(struct pci_dev *dev)
2703 {
2704 	int ret = pci_set_power_state(dev, PCI_D0);
2705 
2706 	if (ret)
2707 		return ret;
2708 
2709 	pci_enable_wake(dev, PCI_D0, false);
2710 	return 0;
2711 }
2712 EXPORT_SYMBOL(pci_back_from_sleep);
2713 
2714 /**
2715  * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2716  * @dev: PCI device being suspended.
2717  *
2718  * Prepare @dev to generate wake-up events at run time and put it into a low
2719  * power state.
2720  */
pci_finish_runtime_suspend(struct pci_dev * dev)2721 int pci_finish_runtime_suspend(struct pci_dev *dev)
2722 {
2723 	pci_power_t target_state;
2724 	int error;
2725 
2726 	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2727 	if (target_state == PCI_POWER_ERROR)
2728 		return -EIO;
2729 
2730 	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2731 
2732 	error = pci_set_power_state(dev, target_state);
2733 
2734 	if (error)
2735 		pci_enable_wake(dev, target_state, false);
2736 
2737 	return error;
2738 }
2739 
2740 /**
2741  * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2742  * @dev: Device to check.
2743  *
2744  * Return true if the device itself is capable of generating wake-up events
2745  * (through the platform or using the native PCIe PME) or if the device supports
2746  * PME and one of its upstream bridges can generate wake-up events.
2747  */
pci_dev_run_wake(struct pci_dev * dev)2748 bool pci_dev_run_wake(struct pci_dev *dev)
2749 {
2750 	struct pci_bus *bus = dev->bus;
2751 
2752 	if (!dev->pme_support)
2753 		return false;
2754 
2755 	/* PME-capable in principle, but not from the target power state */
2756 	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2757 		return false;
2758 
2759 	if (device_can_wakeup(&dev->dev))
2760 		return true;
2761 
2762 	while (bus->parent) {
2763 		struct pci_dev *bridge = bus->self;
2764 
2765 		if (device_can_wakeup(&bridge->dev))
2766 			return true;
2767 
2768 		bus = bus->parent;
2769 	}
2770 
2771 	/* We have reached the root bus. */
2772 	if (bus->bridge)
2773 		return device_can_wakeup(bus->bridge);
2774 
2775 	return false;
2776 }
2777 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2778 
2779 /**
2780  * pci_dev_need_resume - Check if it is necessary to resume the device.
2781  * @pci_dev: Device to check.
2782  *
2783  * Return 'true' if the device is not runtime-suspended or it has to be
2784  * reconfigured due to wakeup settings difference between system and runtime
2785  * suspend, or the current power state of it is not suitable for the upcoming
2786  * (system-wide) transition.
2787  */
pci_dev_need_resume(struct pci_dev * pci_dev)2788 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2789 {
2790 	struct device *dev = &pci_dev->dev;
2791 	pci_power_t target_state;
2792 
2793 	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2794 		return true;
2795 
2796 	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2797 
2798 	/*
2799 	 * If the earlier platform check has not triggered, D3cold is just power
2800 	 * removal on top of D3hot, so no need to resume the device in that
2801 	 * case.
2802 	 */
2803 	return target_state != pci_dev->current_state &&
2804 		target_state != PCI_D3cold &&
2805 		pci_dev->current_state != PCI_D3hot;
2806 }
2807 
2808 /**
2809  * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2810  * @pci_dev: Device to check.
2811  *
2812  * If the device is suspended and it is not configured for system wakeup,
2813  * disable PME for it to prevent it from waking up the system unnecessarily.
2814  *
2815  * Note that if the device's power state is D3cold and the platform check in
2816  * pci_dev_need_resume() has not triggered, the device's configuration need not
2817  * be changed.
2818  */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2819 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2820 {
2821 	struct device *dev = &pci_dev->dev;
2822 
2823 	spin_lock_irq(&dev->power.lock);
2824 
2825 	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2826 	    pci_dev->current_state < PCI_D3cold)
2827 		__pci_pme_active(pci_dev, false);
2828 
2829 	spin_unlock_irq(&dev->power.lock);
2830 }
2831 
2832 /**
2833  * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2834  * @pci_dev: Device to handle.
2835  *
2836  * If the device is runtime suspended and wakeup-capable, enable PME for it as
2837  * it might have been disabled during the prepare phase of system suspend if
2838  * the device was not configured for system wakeup.
2839  */
pci_dev_complete_resume(struct pci_dev * pci_dev)2840 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2841 {
2842 	struct device *dev = &pci_dev->dev;
2843 
2844 	if (!pci_dev_run_wake(pci_dev))
2845 		return;
2846 
2847 	spin_lock_irq(&dev->power.lock);
2848 
2849 	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2850 		__pci_pme_active(pci_dev, true);
2851 
2852 	spin_unlock_irq(&dev->power.lock);
2853 }
2854 
2855 /**
2856  * pci_choose_state - Choose the power state of a PCI device.
2857  * @dev: Target PCI device.
2858  * @state: Target state for the whole system.
2859  *
2860  * Returns PCI power state suitable for @dev and @state.
2861  */
pci_choose_state(struct pci_dev * dev,pm_message_t state)2862 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2863 {
2864 	if (state.event == PM_EVENT_ON)
2865 		return PCI_D0;
2866 
2867 	return pci_target_state(dev, false);
2868 }
2869 EXPORT_SYMBOL(pci_choose_state);
2870 
pci_config_pm_runtime_get(struct pci_dev * pdev)2871 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2872 {
2873 	struct device *dev = &pdev->dev;
2874 	struct device *parent = dev->parent;
2875 
2876 	if (parent)
2877 		pm_runtime_get_sync(parent);
2878 	pm_runtime_get_noresume(dev);
2879 	/*
2880 	 * pdev->current_state is set to PCI_D3cold during suspending,
2881 	 * so wait until suspending completes
2882 	 */
2883 	pm_runtime_barrier(dev);
2884 	/*
2885 	 * Only need to resume devices in D3cold, because config
2886 	 * registers are still accessible for devices suspended but
2887 	 * not in D3cold.
2888 	 */
2889 	if (pdev->current_state == PCI_D3cold)
2890 		pm_runtime_resume(dev);
2891 }
2892 
pci_config_pm_runtime_put(struct pci_dev * pdev)2893 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2894 {
2895 	struct device *dev = &pdev->dev;
2896 	struct device *parent = dev->parent;
2897 
2898 	pm_runtime_put(dev);
2899 	if (parent)
2900 		pm_runtime_put_sync(parent);
2901 }
2902 
2903 static const struct dmi_system_id bridge_d3_blacklist[] = {
2904 #ifdef CONFIG_X86
2905 	{
2906 		/*
2907 		 * Gigabyte X299 root port is not marked as hotplug capable
2908 		 * which allows Linux to power manage it.  However, this
2909 		 * confuses the BIOS SMI handler so don't power manage root
2910 		 * ports on that system.
2911 		 */
2912 		.ident = "X299 DESIGNARE EX-CF",
2913 		.matches = {
2914 			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2915 			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2916 		},
2917 	},
2918 	{
2919 		/*
2920 		 * Downstream device is not accessible after putting a root port
2921 		 * into D3cold and back into D0 on Elo Continental Z2 board
2922 		 */
2923 		.ident = "Elo Continental Z2",
2924 		.matches = {
2925 			DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
2926 			DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
2927 			DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
2928 		},
2929 	},
2930 	{
2931 		/*
2932 		 * Changing power state of root port dGPU is connected fails
2933 		 * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
2934 		 */
2935 		.ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
2936 		.matches = {
2937 			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
2938 			DMI_MATCH(DMI_BOARD_NAME, "1972"),
2939 			DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
2940 		},
2941 	},
2942 #endif
2943 	{ }
2944 };
2945 
2946 /**
2947  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2948  * @bridge: Bridge to check
2949  *
2950  * Currently we only allow D3 for some PCIe ports and for Thunderbolt.
2951  *
2952  * Return: Whether it is possible to move the bridge to D3.
2953  *
2954  * The return value is guaranteed to be constant across the entire lifetime
2955  * of the bridge, including its hot-removal.
2956  */
pci_bridge_d3_possible(struct pci_dev * bridge)2957 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2958 {
2959 	if (!pci_is_pcie(bridge))
2960 		return false;
2961 
2962 	switch (pci_pcie_type(bridge)) {
2963 	case PCI_EXP_TYPE_ROOT_PORT:
2964 	case PCI_EXP_TYPE_UPSTREAM:
2965 	case PCI_EXP_TYPE_DOWNSTREAM:
2966 		if (pci_bridge_d3_disable)
2967 			return false;
2968 
2969 		/*
2970 		 * Hotplug ports handled by platform firmware may not be put
2971 		 * into D3 by the OS, e.g. ACPI slots ...
2972 		 */
2973 		if (bridge->is_hotplug_bridge && !bridge->is_pciehp)
2974 			return false;
2975 
2976 		/* ... or PCIe hotplug ports not handled natively by the OS. */
2977 		if (bridge->is_pciehp && !pciehp_is_native(bridge))
2978 			return false;
2979 
2980 		if (pci_bridge_d3_force)
2981 			return true;
2982 
2983 		/* Even the oldest 2010 Thunderbolt controller supports D3. */
2984 		if (bridge->is_thunderbolt)
2985 			return true;
2986 
2987 		/* Platform might know better if the bridge supports D3 */
2988 		if (platform_pci_bridge_d3(bridge))
2989 			return true;
2990 
2991 		/*
2992 		 * Hotplug ports handled natively by the OS were not validated
2993 		 * by vendors for runtime D3 at least until 2018 because there
2994 		 * was no OS support.
2995 		 */
2996 		if (bridge->is_pciehp)
2997 			return false;
2998 
2999 		if (dmi_check_system(bridge_d3_blacklist))
3000 			return false;
3001 
3002 		/*
3003 		 * Out of caution, we only allow PCIe ports from 2015 or newer
3004 		 * into D3 on x86.
3005 		 */
3006 		if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015)
3007 			return true;
3008 		break;
3009 	}
3010 
3011 	return false;
3012 }
3013 
pci_dev_check_d3cold(struct pci_dev * dev,void * data)3014 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3015 {
3016 	bool *d3cold_ok = data;
3017 
3018 	if (/* The device needs to be allowed to go D3cold ... */
3019 	    dev->no_d3cold || !dev->d3cold_allowed ||
3020 
3021 	    /* ... and if it is wakeup capable to do so from D3cold. */
3022 	    (device_may_wakeup(&dev->dev) &&
3023 	     !pci_pme_capable(dev, PCI_D3cold)) ||
3024 
3025 	    /* If it is a bridge it must be allowed to go to D3. */
3026 	    !pci_power_manageable(dev))
3027 
3028 		*d3cold_ok = false;
3029 
3030 	return !*d3cold_ok;
3031 }
3032 
3033 /*
3034  * pci_bridge_d3_update - Update bridge D3 capabilities
3035  * @dev: PCI device which is changed
3036  *
3037  * Update upstream bridge PM capabilities accordingly depending on if the
3038  * device PM configuration was changed or the device is being removed.  The
3039  * change is also propagated upstream.
3040  */
pci_bridge_d3_update(struct pci_dev * dev)3041 void pci_bridge_d3_update(struct pci_dev *dev)
3042 {
3043 	bool remove = !device_is_registered(&dev->dev);
3044 	struct pci_dev *bridge;
3045 	bool d3cold_ok = true;
3046 
3047 	bridge = pci_upstream_bridge(dev);
3048 	if (!bridge || !pci_bridge_d3_possible(bridge))
3049 		return;
3050 
3051 	/*
3052 	 * If D3 is currently allowed for the bridge, removing one of its
3053 	 * children won't change that.
3054 	 */
3055 	if (remove && bridge->bridge_d3)
3056 		return;
3057 
3058 	/*
3059 	 * If D3 is currently allowed for the bridge and a child is added or
3060 	 * changed, disallowance of D3 can only be caused by that child, so
3061 	 * we only need to check that single device, not any of its siblings.
3062 	 *
3063 	 * If D3 is currently not allowed for the bridge, checking the device
3064 	 * first may allow us to skip checking its siblings.
3065 	 */
3066 	if (!remove)
3067 		pci_dev_check_d3cold(dev, &d3cold_ok);
3068 
3069 	/*
3070 	 * If D3 is currently not allowed for the bridge, this may be caused
3071 	 * either by the device being changed/removed or any of its siblings,
3072 	 * so we need to go through all children to find out if one of them
3073 	 * continues to block D3.
3074 	 */
3075 	if (d3cold_ok && !bridge->bridge_d3)
3076 		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3077 			     &d3cold_ok);
3078 
3079 	if (bridge->bridge_d3 != d3cold_ok) {
3080 		bridge->bridge_d3 = d3cold_ok;
3081 		/* Propagate change to upstream bridges */
3082 		pci_bridge_d3_update(bridge);
3083 	}
3084 }
3085 
3086 /**
3087  * pci_d3cold_enable - Enable D3cold for device
3088  * @dev: PCI device to handle
3089  *
3090  * This function can be used in drivers to enable D3cold from the device
3091  * they handle.  It also updates upstream PCI bridge PM capabilities
3092  * accordingly.
3093  */
pci_d3cold_enable(struct pci_dev * dev)3094 void pci_d3cold_enable(struct pci_dev *dev)
3095 {
3096 	if (dev->no_d3cold) {
3097 		dev->no_d3cold = false;
3098 		pci_bridge_d3_update(dev);
3099 	}
3100 }
3101 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3102 
3103 /**
3104  * pci_d3cold_disable - Disable D3cold for device
3105  * @dev: PCI device to handle
3106  *
3107  * This function can be used in drivers to disable D3cold from the device
3108  * they handle.  It also updates upstream PCI bridge PM capabilities
3109  * accordingly.
3110  */
pci_d3cold_disable(struct pci_dev * dev)3111 void pci_d3cold_disable(struct pci_dev *dev)
3112 {
3113 	if (!dev->no_d3cold) {
3114 		dev->no_d3cold = true;
3115 		pci_bridge_d3_update(dev);
3116 	}
3117 }
3118 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3119 
pci_pm_power_up_and_verify_state(struct pci_dev * pci_dev)3120 void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
3121 {
3122 	pci_power_up(pci_dev);
3123 	pci_update_current_state(pci_dev, PCI_D0);
3124 }
3125 
3126 /**
3127  * pci_pm_init - Initialize PM functions of given PCI device
3128  * @dev: PCI device to handle.
3129  */
pci_pm_init(struct pci_dev * dev)3130 void pci_pm_init(struct pci_dev *dev)
3131 {
3132 	int pm;
3133 	u16 pmc;
3134 
3135 	device_enable_async_suspend(&dev->dev);
3136 	dev->wakeup_prepared = false;
3137 
3138 	dev->pm_cap = 0;
3139 	dev->pme_support = 0;
3140 
3141 	/* find PCI PM capability in list */
3142 	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3143 	if (!pm)
3144 		goto poweron;
3145 	/* Check device's ability to generate PME# */
3146 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3147 
3148 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3149 		pci_err(dev, "unsupported PM cap regs version (%u)\n",
3150 			pmc & PCI_PM_CAP_VER_MASK);
3151 		goto poweron;
3152 	}
3153 
3154 	dev->pm_cap = pm;
3155 	dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3156 	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3157 	dev->bridge_d3 = pci_bridge_d3_possible(dev);
3158 	dev->d3cold_allowed = true;
3159 
3160 	dev->d1_support = false;
3161 	dev->d2_support = false;
3162 	if (!pci_no_d1d2(dev)) {
3163 		if (pmc & PCI_PM_CAP_D1)
3164 			dev->d1_support = true;
3165 		if (pmc & PCI_PM_CAP_D2)
3166 			dev->d2_support = true;
3167 
3168 		if (dev->d1_support || dev->d2_support)
3169 			pci_info(dev, "supports%s%s\n",
3170 				   dev->d1_support ? " D1" : "",
3171 				   dev->d2_support ? " D2" : "");
3172 	}
3173 
3174 	pmc &= PCI_PM_CAP_PME_MASK;
3175 	if (pmc) {
3176 		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3177 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3178 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3179 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3180 			 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3181 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3182 		dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
3183 		dev->pme_poll = true;
3184 		/*
3185 		 * Make device's PM flags reflect the wake-up capability, but
3186 		 * let the user space enable it to wake up the system as needed.
3187 		 */
3188 		device_set_wakeup_capable(&dev->dev, true);
3189 		/* Disable the PME# generation functionality */
3190 		pci_pme_active(dev, false);
3191 	}
3192 
3193 poweron:
3194 	pci_pm_power_up_and_verify_state(dev);
3195 	pm_runtime_forbid(&dev->dev);
3196 
3197 	/*
3198 	 * Runtime PM will be enabled for the device when it has been fully
3199 	 * configured, but since its parent and suppliers may suspend in
3200 	 * the meantime, prevent them from doing so by changing the
3201 	 * device's runtime PM status to "active".
3202 	 */
3203 	pm_runtime_set_active(&dev->dev);
3204 }
3205 
pci_ea_flags(struct pci_dev * dev,u8 prop)3206 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3207 {
3208 	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3209 
3210 	switch (prop) {
3211 	case PCI_EA_P_MEM:
3212 	case PCI_EA_P_VF_MEM:
3213 		flags |= IORESOURCE_MEM;
3214 		break;
3215 	case PCI_EA_P_MEM_PREFETCH:
3216 	case PCI_EA_P_VF_MEM_PREFETCH:
3217 		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3218 		break;
3219 	case PCI_EA_P_IO:
3220 		flags |= IORESOURCE_IO;
3221 		break;
3222 	default:
3223 		return 0;
3224 	}
3225 
3226 	return flags;
3227 }
3228 
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)3229 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3230 					    u8 prop)
3231 {
3232 	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3233 		return &dev->resource[bei];
3234 #ifdef CONFIG_PCI_IOV
3235 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3236 		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3237 		return &dev->resource[PCI_IOV_RESOURCES +
3238 				      bei - PCI_EA_BEI_VF_BAR0];
3239 #endif
3240 	else if (bei == PCI_EA_BEI_ROM)
3241 		return &dev->resource[PCI_ROM_RESOURCE];
3242 	else
3243 		return NULL;
3244 }
3245 
3246 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)3247 static int pci_ea_read(struct pci_dev *dev, int offset)
3248 {
3249 	struct resource *res;
3250 	const char *res_name;
3251 	int ent_size, ent_offset = offset;
3252 	resource_size_t start, end;
3253 	unsigned long flags;
3254 	u32 dw0, bei, base, max_offset;
3255 	u8 prop;
3256 	bool support_64 = (sizeof(resource_size_t) >= 8);
3257 
3258 	pci_read_config_dword(dev, ent_offset, &dw0);
3259 	ent_offset += 4;
3260 
3261 	/* Entry size field indicates DWORDs after 1st */
3262 	ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2;
3263 
3264 	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3265 		goto out;
3266 
3267 	bei = FIELD_GET(PCI_EA_BEI, dw0);
3268 	prop = FIELD_GET(PCI_EA_PP, dw0);
3269 
3270 	/*
3271 	 * If the Property is in the reserved range, try the Secondary
3272 	 * Property instead.
3273 	 */
3274 	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3275 		prop = FIELD_GET(PCI_EA_SP, dw0);
3276 	if (prop > PCI_EA_P_BRIDGE_IO)
3277 		goto out;
3278 
3279 	res = pci_ea_get_resource(dev, bei, prop);
3280 	res_name = pci_resource_name(dev, bei);
3281 	if (!res) {
3282 		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3283 		goto out;
3284 	}
3285 
3286 	flags = pci_ea_flags(dev, prop);
3287 	if (!flags) {
3288 		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3289 		goto out;
3290 	}
3291 
3292 	/* Read Base */
3293 	pci_read_config_dword(dev, ent_offset, &base);
3294 	start = (base & PCI_EA_FIELD_MASK);
3295 	ent_offset += 4;
3296 
3297 	/* Read MaxOffset */
3298 	pci_read_config_dword(dev, ent_offset, &max_offset);
3299 	ent_offset += 4;
3300 
3301 	/* Read Base MSBs (if 64-bit entry) */
3302 	if (base & PCI_EA_IS_64) {
3303 		u32 base_upper;
3304 
3305 		pci_read_config_dword(dev, ent_offset, &base_upper);
3306 		ent_offset += 4;
3307 
3308 		flags |= IORESOURCE_MEM_64;
3309 
3310 		/* entry starts above 32-bit boundary, can't use */
3311 		if (!support_64 && base_upper)
3312 			goto out;
3313 
3314 		if (support_64)
3315 			start |= ((u64)base_upper << 32);
3316 	}
3317 
3318 	end = start + (max_offset | 0x03);
3319 
3320 	/* Read MaxOffset MSBs (if 64-bit entry) */
3321 	if (max_offset & PCI_EA_IS_64) {
3322 		u32 max_offset_upper;
3323 
3324 		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3325 		ent_offset += 4;
3326 
3327 		flags |= IORESOURCE_MEM_64;
3328 
3329 		/* entry too big, can't use */
3330 		if (!support_64 && max_offset_upper)
3331 			goto out;
3332 
3333 		if (support_64)
3334 			end += ((u64)max_offset_upper << 32);
3335 	}
3336 
3337 	if (end < start) {
3338 		pci_err(dev, "EA Entry crosses address boundary\n");
3339 		goto out;
3340 	}
3341 
3342 	if (ent_size != ent_offset - offset) {
3343 		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3344 			ent_size, ent_offset - offset);
3345 		goto out;
3346 	}
3347 
3348 	res->name = pci_name(dev);
3349 	res->start = start;
3350 	res->end = end;
3351 	res->flags = flags;
3352 
3353 	if (bei <= PCI_EA_BEI_BAR5)
3354 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3355 			 res_name, res, prop);
3356 	else if (bei == PCI_EA_BEI_ROM)
3357 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3358 			 res_name, res, prop);
3359 	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3360 		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3361 			 res_name, res, prop);
3362 	else
3363 		pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
3364 			   bei, res, prop);
3365 
3366 out:
3367 	return offset + ent_size;
3368 }
3369 
3370 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3371 void pci_ea_init(struct pci_dev *dev)
3372 {
3373 	int ea;
3374 	u8 num_ent;
3375 	int offset;
3376 	int i;
3377 
3378 	/* find PCI EA capability in list */
3379 	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3380 	if (!ea)
3381 		return;
3382 
3383 	/* determine the number of entries */
3384 	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3385 					&num_ent);
3386 	num_ent &= PCI_EA_NUM_ENT_MASK;
3387 
3388 	offset = ea + PCI_EA_FIRST_ENT;
3389 
3390 	/* Skip DWORD 2 for type 1 functions */
3391 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3392 		offset += 4;
3393 
3394 	/* parse each EA entry */
3395 	for (i = 0; i < num_ent; ++i)
3396 		offset = pci_ea_read(dev, offset);
3397 }
3398 
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3399 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3400 	struct pci_cap_saved_state *new_cap)
3401 {
3402 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3403 }
3404 
3405 /**
3406  * _pci_add_cap_save_buffer - allocate buffer for saving given
3407  *			      capability registers
3408  * @dev: the PCI device
3409  * @cap: the capability to allocate the buffer for
3410  * @extended: Standard or Extended capability ID
3411  * @size: requested size of the buffer
3412  */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3413 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3414 				    bool extended, unsigned int size)
3415 {
3416 	int pos;
3417 	struct pci_cap_saved_state *save_state;
3418 
3419 	if (extended)
3420 		pos = pci_find_ext_capability(dev, cap);
3421 	else
3422 		pos = pci_find_capability(dev, cap);
3423 
3424 	if (!pos)
3425 		return 0;
3426 
3427 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3428 	if (!save_state)
3429 		return -ENOMEM;
3430 
3431 	save_state->cap.cap_nr = cap;
3432 	save_state->cap.cap_extended = extended;
3433 	save_state->cap.size = size;
3434 	pci_add_saved_cap(dev, save_state);
3435 
3436 	return 0;
3437 }
3438 
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3439 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3440 {
3441 	return _pci_add_cap_save_buffer(dev, cap, false, size);
3442 }
3443 
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3444 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3445 {
3446 	return _pci_add_cap_save_buffer(dev, cap, true, size);
3447 }
3448 
3449 /**
3450  * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3451  * @dev: the PCI device
3452  */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3453 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3454 {
3455 	int error;
3456 
3457 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3458 					PCI_EXP_SAVE_REGS * sizeof(u16));
3459 	if (error)
3460 		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3461 
3462 	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3463 	if (error)
3464 		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3465 
3466 	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3467 					    2 * sizeof(u16));
3468 	if (error)
3469 		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3470 
3471 	pci_allocate_vc_save_buffers(dev);
3472 }
3473 
pci_free_cap_save_buffers(struct pci_dev * dev)3474 void pci_free_cap_save_buffers(struct pci_dev *dev)
3475 {
3476 	struct pci_cap_saved_state *tmp;
3477 	struct hlist_node *n;
3478 
3479 	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3480 		kfree(tmp);
3481 }
3482 
3483 /**
3484  * pci_configure_ari - enable or disable ARI forwarding
3485  * @dev: the PCI device
3486  *
3487  * If @dev and its upstream bridge both support ARI, enable ARI in the
3488  * bridge.  Otherwise, disable ARI in the bridge.
3489  */
pci_configure_ari(struct pci_dev * dev)3490 void pci_configure_ari(struct pci_dev *dev)
3491 {
3492 	u32 cap;
3493 	struct pci_dev *bridge;
3494 
3495 	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3496 		return;
3497 
3498 	bridge = dev->bus->self;
3499 	if (!bridge)
3500 		return;
3501 
3502 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3503 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3504 		return;
3505 
3506 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3507 		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3508 					 PCI_EXP_DEVCTL2_ARI);
3509 		bridge->ari_enabled = 1;
3510 	} else {
3511 		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3512 					   PCI_EXP_DEVCTL2_ARI);
3513 		bridge->ari_enabled = 0;
3514 	}
3515 }
3516 
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3517 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3518 {
3519 	int pos;
3520 	u16 ctrl;
3521 
3522 	pos = pdev->acs_cap;
3523 	if (!pos)
3524 		return false;
3525 
3526 	/*
3527 	 * Except for egress control, capabilities are either required
3528 	 * or only required if controllable.  Features missing from the
3529 	 * capability field can therefore be assumed as hard-wired enabled.
3530 	 */
3531 	acs_flags &= (pdev->acs_capabilities | PCI_ACS_EC);
3532 
3533 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3534 	return (ctrl & acs_flags) == acs_flags;
3535 }
3536 
3537 /**
3538  * pci_acs_enabled - test ACS against required flags for a given device
3539  * @pdev: device to test
3540  * @acs_flags: required PCI ACS flags
3541  *
3542  * Return true if the device supports the provided flags.  Automatically
3543  * filters out flags that are not implemented on multifunction devices.
3544  *
3545  * Note that this interface checks the effective ACS capabilities of the
3546  * device rather than the actual capabilities.  For instance, most single
3547  * function endpoints are not required to support ACS because they have no
3548  * opportunity for peer-to-peer access.  We therefore return 'true'
3549  * regardless of whether the device exposes an ACS capability.  This makes
3550  * it much easier for callers of this function to ignore the actual type
3551  * or topology of the device when testing ACS support.
3552  */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3553 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3554 {
3555 	int ret;
3556 
3557 	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3558 	if (ret >= 0)
3559 		return ret > 0;
3560 
3561 	/*
3562 	 * Conventional PCI and PCI-X devices never support ACS, either
3563 	 * effectively or actually.  The shared bus topology implies that
3564 	 * any device on the bus can receive or snoop DMA.
3565 	 */
3566 	if (!pci_is_pcie(pdev))
3567 		return false;
3568 
3569 	switch (pci_pcie_type(pdev)) {
3570 	/*
3571 	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3572 	 * but since their primary interface is PCI/X, we conservatively
3573 	 * handle them as we would a non-PCIe device.
3574 	 */
3575 	case PCI_EXP_TYPE_PCIE_BRIDGE:
3576 	/*
3577 	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3578 	 * applicable... must never implement an ACS Extended Capability...".
3579 	 * This seems arbitrary, but we take a conservative interpretation
3580 	 * of this statement.
3581 	 */
3582 	case PCI_EXP_TYPE_PCI_BRIDGE:
3583 	case PCI_EXP_TYPE_RC_EC:
3584 		return false;
3585 	/*
3586 	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3587 	 * implement ACS in order to indicate their peer-to-peer capabilities,
3588 	 * regardless of whether they are single- or multi-function devices.
3589 	 */
3590 	case PCI_EXP_TYPE_DOWNSTREAM:
3591 	case PCI_EXP_TYPE_ROOT_PORT:
3592 		return pci_acs_flags_enabled(pdev, acs_flags);
3593 	/*
3594 	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3595 	 * implemented by the remaining PCIe types to indicate peer-to-peer
3596 	 * capabilities, but only when they are part of a multifunction
3597 	 * device.  The footnote for section 6.12 indicates the specific
3598 	 * PCIe types included here.
3599 	 */
3600 	case PCI_EXP_TYPE_ENDPOINT:
3601 	case PCI_EXP_TYPE_UPSTREAM:
3602 	case PCI_EXP_TYPE_LEG_END:
3603 	case PCI_EXP_TYPE_RC_END:
3604 		if (!pdev->multifunction)
3605 			break;
3606 
3607 		return pci_acs_flags_enabled(pdev, acs_flags);
3608 	}
3609 
3610 	/*
3611 	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3612 	 * to single function devices with the exception of downstream ports.
3613 	 */
3614 	return true;
3615 }
3616 
3617 /**
3618  * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3619  * @start: starting downstream device
3620  * @end: ending upstream device or NULL to search to the root bus
3621  * @acs_flags: required flags
3622  *
3623  * Walk up a device tree from start to end testing PCI ACS support.  If
3624  * any step along the way does not support the required flags, return false.
3625  */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3626 bool pci_acs_path_enabled(struct pci_dev *start,
3627 			  struct pci_dev *end, u16 acs_flags)
3628 {
3629 	struct pci_dev *pdev, *parent = start;
3630 
3631 	do {
3632 		pdev = parent;
3633 
3634 		if (!pci_acs_enabled(pdev, acs_flags))
3635 			return false;
3636 
3637 		if (pci_is_root_bus(pdev->bus))
3638 			return (end == NULL);
3639 
3640 		parent = pdev->bus->self;
3641 	} while (pdev != end);
3642 
3643 	return true;
3644 }
3645 
3646 /**
3647  * pci_acs_init - Initialize ACS if hardware supports it
3648  * @dev: the PCI device
3649  */
pci_acs_init(struct pci_dev * dev)3650 void pci_acs_init(struct pci_dev *dev)
3651 {
3652 	int pos;
3653 
3654 	dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3655 	pos = dev->acs_cap;
3656 	if (!pos)
3657 		return;
3658 
3659 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &dev->acs_capabilities);
3660 	pci_disable_broken_acs_cap(dev);
3661 }
3662 
3663 /**
3664  * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3665  * @dev: the PCI device
3666  * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3667  *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3668  *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3669  *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3670  *
3671  * Return 0 if all upstream bridges support AtomicOp routing, egress
3672  * blocking is disabled on all upstream ports, and the root port supports
3673  * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3674  * AtomicOp completion), or negative otherwise.
3675  */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3676 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3677 {
3678 	struct pci_bus *bus = dev->bus;
3679 	struct pci_dev *bridge;
3680 	u32 cap, ctl2;
3681 
3682 	/*
3683 	 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3684 	 * in Device Control 2 is reserved in VFs and the PF value applies
3685 	 * to all associated VFs.
3686 	 */
3687 	if (dev->is_virtfn)
3688 		return -EINVAL;
3689 
3690 	if (!pci_is_pcie(dev))
3691 		return -EINVAL;
3692 
3693 	/*
3694 	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3695 	 * AtomicOp requesters.  For now, we only support endpoints as
3696 	 * requesters and root ports as completers.  No endpoints as
3697 	 * completers, and no peer-to-peer.
3698 	 */
3699 
3700 	switch (pci_pcie_type(dev)) {
3701 	case PCI_EXP_TYPE_ENDPOINT:
3702 	case PCI_EXP_TYPE_LEG_END:
3703 	case PCI_EXP_TYPE_RC_END:
3704 		break;
3705 	default:
3706 		return -EINVAL;
3707 	}
3708 
3709 	while (bus->parent) {
3710 		bridge = bus->self;
3711 
3712 		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3713 
3714 		switch (pci_pcie_type(bridge)) {
3715 		/* Ensure switch ports support AtomicOp routing */
3716 		case PCI_EXP_TYPE_UPSTREAM:
3717 		case PCI_EXP_TYPE_DOWNSTREAM:
3718 			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3719 				return -EINVAL;
3720 			break;
3721 
3722 		/* Ensure root port supports all the sizes we care about */
3723 		case PCI_EXP_TYPE_ROOT_PORT:
3724 			if ((cap & cap_mask) != cap_mask)
3725 				return -EINVAL;
3726 			break;
3727 		}
3728 
3729 		/* Ensure upstream ports don't block AtomicOps on egress */
3730 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3731 			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3732 						   &ctl2);
3733 			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3734 				return -EINVAL;
3735 		}
3736 
3737 		bus = bus->parent;
3738 	}
3739 
3740 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3741 				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3742 	return 0;
3743 }
3744 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3745 
3746 /**
3747  * pci_release_region - Release a PCI bar
3748  * @pdev: PCI device whose resources were previously reserved by
3749  *	  pci_request_region()
3750  * @bar: BAR to release
3751  *
3752  * Releases the PCI I/O and memory resources previously reserved by a
3753  * successful call to pci_request_region().  Call this function only
3754  * after all use of the PCI regions has ceased.
3755  */
pci_release_region(struct pci_dev * pdev,int bar)3756 void pci_release_region(struct pci_dev *pdev, int bar)
3757 {
3758 	if (!pci_bar_index_is_valid(bar))
3759 		return;
3760 
3761 	if (pci_resource_len(pdev, bar) == 0)
3762 		return;
3763 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3764 		release_region(pci_resource_start(pdev, bar),
3765 				pci_resource_len(pdev, bar));
3766 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3767 		release_mem_region(pci_resource_start(pdev, bar),
3768 				pci_resource_len(pdev, bar));
3769 }
3770 EXPORT_SYMBOL(pci_release_region);
3771 
3772 /**
3773  * __pci_request_region - Reserved PCI I/O and memory resource
3774  * @pdev: PCI device whose resources are to be reserved
3775  * @bar: BAR to be reserved
3776  * @name: name of the driver requesting the resource
3777  * @exclusive: whether the region access is exclusive or not
3778  *
3779  * Returns: 0 on success, negative error code on failure.
3780  *
3781  * Mark the PCI region associated with PCI device @pdev BAR @bar as being
3782  * reserved by owner @name. Do not access any address inside the PCI regions
3783  * unless this call returns successfully.
3784  *
3785  * If @exclusive is set, then the region is marked so that userspace
3786  * is explicitly not allowed to map the resource via /dev/mem or
3787  * sysfs MMIO access.
3788  *
3789  * Returns 0 on success, or %EBUSY on error.  A warning
3790  * message is also printed on failure.
3791  */
__pci_request_region(struct pci_dev * pdev,int bar,const char * name,int exclusive)3792 static int __pci_request_region(struct pci_dev *pdev, int bar,
3793 				const char *name, int exclusive)
3794 {
3795 	if (!pci_bar_index_is_valid(bar))
3796 		return -EINVAL;
3797 
3798 	if (pci_resource_len(pdev, bar) == 0)
3799 		return 0;
3800 
3801 	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3802 		if (!request_region(pci_resource_start(pdev, bar),
3803 			    pci_resource_len(pdev, bar), name))
3804 			goto err_out;
3805 	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3806 		if (!__request_mem_region(pci_resource_start(pdev, bar),
3807 					pci_resource_len(pdev, bar), name,
3808 					exclusive))
3809 			goto err_out;
3810 	}
3811 
3812 	return 0;
3813 
3814 err_out:
3815 	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3816 		 &pdev->resource[bar]);
3817 	return -EBUSY;
3818 }
3819 
3820 /**
3821  * pci_request_region - Reserve PCI I/O and memory resource
3822  * @pdev: PCI device whose resources are to be reserved
3823  * @bar: BAR to be reserved
3824  * @name: name of the driver requesting the resource
3825  *
3826  * Returns: 0 on success, negative error code on failure.
3827  *
3828  * Mark the PCI region associated with PCI device @pdev BAR @bar as being
3829  * reserved by owner @name. Do not access any address inside the PCI regions
3830  * unless this call returns successfully.
3831  *
3832  * Returns 0 on success, or %EBUSY on error.  A warning
3833  * message is also printed on failure.
3834  */
pci_request_region(struct pci_dev * pdev,int bar,const char * name)3835 int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
3836 {
3837 	return __pci_request_region(pdev, bar, name, 0);
3838 }
3839 EXPORT_SYMBOL(pci_request_region);
3840 
3841 /**
3842  * pci_release_selected_regions - Release selected PCI I/O and memory resources
3843  * @pdev: PCI device whose resources were previously reserved
3844  * @bars: Bitmask of BARs to be released
3845  *
3846  * Release selected PCI I/O and memory resources previously reserved.
3847  * Call this function only after all use of the PCI regions has ceased.
3848  */
pci_release_selected_regions(struct pci_dev * pdev,int bars)3849 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3850 {
3851 	int i;
3852 
3853 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
3854 		if (bars & (1 << i))
3855 			pci_release_region(pdev, i);
3856 }
3857 EXPORT_SYMBOL(pci_release_selected_regions);
3858 
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * name,int excl)3859 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3860 					  const char *name, int excl)
3861 {
3862 	int i;
3863 
3864 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
3865 		if (bars & (1 << i))
3866 			if (__pci_request_region(pdev, i, name, excl))
3867 				goto err_out;
3868 	return 0;
3869 
3870 err_out:
3871 	while (--i >= 0)
3872 		if (bars & (1 << i))
3873 			pci_release_region(pdev, i);
3874 
3875 	return -EBUSY;
3876 }
3877 
3878 
3879 /**
3880  * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3881  * @pdev: PCI device whose resources are to be reserved
3882  * @bars: Bitmask of BARs to be requested
3883  * @name: Name of the driver requesting the resources
3884  *
3885  * Returns: 0 on success, negative error code on failure.
3886  */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * name)3887 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3888 				 const char *name)
3889 {
3890 	return __pci_request_selected_regions(pdev, bars, name, 0);
3891 }
3892 EXPORT_SYMBOL(pci_request_selected_regions);
3893 
3894 /**
3895  * pci_request_selected_regions_exclusive - Request regions exclusively
3896  * @pdev: PCI device to request regions from
3897  * @bars: bit mask of BARs to request
3898  * @name: name of the driver requesting the resources
3899  *
3900  * Returns: 0 on success, negative error code on failure.
3901  */
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * name)3902 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3903 					   const char *name)
3904 {
3905 	return __pci_request_selected_regions(pdev, bars, name,
3906 			IORESOURCE_EXCLUSIVE);
3907 }
3908 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3909 
3910 /**
3911  * pci_release_regions - Release reserved PCI I/O and memory resources
3912  * @pdev: PCI device whose resources were previously reserved by
3913  *	  pci_request_regions()
3914  *
3915  * Releases all PCI I/O and memory resources previously reserved by a
3916  * successful call to pci_request_regions().  Call this function only
3917  * after all use of the PCI regions has ceased.
3918  */
pci_release_regions(struct pci_dev * pdev)3919 void pci_release_regions(struct pci_dev *pdev)
3920 {
3921 	pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3922 }
3923 EXPORT_SYMBOL(pci_release_regions);
3924 
3925 /**
3926  * pci_request_regions - Reserve PCI I/O and memory resources
3927  * @pdev: PCI device whose resources are to be reserved
3928  * @name: name of the driver requesting the resources
3929  *
3930  * Mark all PCI regions associated with PCI device @pdev as being reserved by
3931  * owner @name. Do not access any address inside the PCI regions unless this
3932  * call returns successfully.
3933  *
3934  * Returns 0 on success, or %EBUSY on error.  A warning
3935  * message is also printed on failure.
3936  */
pci_request_regions(struct pci_dev * pdev,const char * name)3937 int pci_request_regions(struct pci_dev *pdev, const char *name)
3938 {
3939 	return pci_request_selected_regions(pdev,
3940 			((1 << PCI_STD_NUM_BARS) - 1), name);
3941 }
3942 EXPORT_SYMBOL(pci_request_regions);
3943 
3944 /**
3945  * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
3946  * @pdev: PCI device whose resources are to be reserved
3947  * @name: name of the driver requesting the resources
3948  *
3949  * Returns: 0 on success, negative error code on failure.
3950  *
3951  * Mark all PCI regions associated with PCI device @pdev as being reserved
3952  * by owner @name. Do not access any address inside the PCI regions
3953  * unless this call returns successfully.
3954  *
3955  * pci_request_regions_exclusive() will mark the region so that /dev/mem
3956  * and the sysfs MMIO access will not be allowed.
3957  *
3958  * Returns 0 on success, or %EBUSY on error.  A warning message is also
3959  * printed on failure.
3960  */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * name)3961 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
3962 {
3963 	return pci_request_selected_regions_exclusive(pdev,
3964 				((1 << PCI_STD_NUM_BARS) - 1), name);
3965 }
3966 EXPORT_SYMBOL(pci_request_regions_exclusive);
3967 
3968 /*
3969  * Record the PCI IO range (expressed as CPU physical address + size).
3970  * Return a negative value if an error has occurred, zero otherwise
3971  */
pci_register_io_range(const struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)3972 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
3973 			resource_size_t	size)
3974 {
3975 	int ret = 0;
3976 #ifdef PCI_IOBASE
3977 	struct logic_pio_hwaddr *range;
3978 
3979 	if (!size || addr + size < addr)
3980 		return -EINVAL;
3981 
3982 	range = kzalloc_obj(*range, GFP_ATOMIC);
3983 	if (!range)
3984 		return -ENOMEM;
3985 
3986 	range->fwnode = fwnode;
3987 	range->size = size;
3988 	range->hw_start = addr;
3989 	range->flags = LOGIC_PIO_CPU_MMIO;
3990 
3991 	ret = logic_pio_register_range(range);
3992 	if (ret)
3993 		kfree(range);
3994 
3995 	/* Ignore duplicates due to deferred probing */
3996 	if (ret == -EEXIST)
3997 		ret = 0;
3998 #endif
3999 
4000 	return ret;
4001 }
4002 
pci_pio_to_address(unsigned long pio)4003 phys_addr_t pci_pio_to_address(unsigned long pio)
4004 {
4005 #ifdef PCI_IOBASE
4006 	if (pio < MMIO_UPPER_LIMIT)
4007 		return logic_pio_to_hwaddr(pio);
4008 #endif
4009 
4010 	return (phys_addr_t) OF_BAD_ADDR;
4011 }
4012 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4013 
pci_address_to_pio(phys_addr_t address)4014 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4015 {
4016 #ifdef PCI_IOBASE
4017 	return logic_pio_trans_cpuaddr(address);
4018 #else
4019 	if (address > IO_SPACE_LIMIT)
4020 		return (unsigned long)-1;
4021 
4022 	return (unsigned long) address;
4023 #endif
4024 }
4025 
4026 /**
4027  * pci_remap_iospace - Remap the memory mapped I/O space
4028  * @res: Resource describing the I/O space
4029  * @phys_addr: physical address of range to be mapped
4030  *
4031  * Remap the memory mapped I/O space described by the @res and the CPU
4032  * physical address @phys_addr into virtual address space.  Only
4033  * architectures that have memory mapped IO functions defined (and the
4034  * PCI_IOBASE value defined) should call this function.
4035  */
4036 #ifndef pci_remap_iospace
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)4037 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4038 {
4039 #if defined(PCI_IOBASE)
4040 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4041 
4042 	if (!(res->flags & IORESOURCE_IO))
4043 		return -EINVAL;
4044 
4045 	if (res->end > IO_SPACE_LIMIT)
4046 		return -EINVAL;
4047 
4048 	return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4049 			       pgprot_device(PAGE_KERNEL));
4050 #else
4051 	/*
4052 	 * This architecture does not have memory mapped I/O space,
4053 	 * so this function should never be called
4054 	 */
4055 	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4056 	return -ENODEV;
4057 #endif
4058 }
4059 EXPORT_SYMBOL(pci_remap_iospace);
4060 #endif
4061 
4062 /**
4063  * pci_unmap_iospace - Unmap the memory mapped I/O space
4064  * @res: resource to be unmapped
4065  *
4066  * Unmap the CPU virtual address @res from virtual address space.  Only
4067  * architectures that have memory mapped IO functions defined (and the
4068  * PCI_IOBASE value defined) should call this function.
4069  */
pci_unmap_iospace(struct resource * res)4070 void pci_unmap_iospace(struct resource *res)
4071 {
4072 #if defined(PCI_IOBASE)
4073 	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4074 
4075 	vunmap_range(vaddr, vaddr + resource_size(res));
4076 #endif
4077 }
4078 EXPORT_SYMBOL(pci_unmap_iospace);
4079 
__pci_set_master(struct pci_dev * dev,bool enable)4080 static void __pci_set_master(struct pci_dev *dev, bool enable)
4081 {
4082 	u16 old_cmd, cmd;
4083 
4084 	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4085 	if (enable)
4086 		cmd = old_cmd | PCI_COMMAND_MASTER;
4087 	else
4088 		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4089 	if (cmd != old_cmd) {
4090 		pci_dbg(dev, "%s bus mastering\n",
4091 			enable ? "enabling" : "disabling");
4092 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4093 	}
4094 	dev->is_busmaster = enable;
4095 }
4096 
4097 /**
4098  * pcibios_setup - process "pci=" kernel boot arguments
4099  * @str: string used to pass in "pci=" kernel boot arguments
4100  *
4101  * Process kernel boot arguments.  This is the default implementation.
4102  * Architecture specific implementations can override this as necessary.
4103  */
pcibios_setup(char * str)4104 char * __weak __init pcibios_setup(char *str)
4105 {
4106 	return str;
4107 }
4108 
4109 /**
4110  * pcibios_set_master - enable PCI bus-mastering for device dev
4111  * @dev: the PCI device to enable
4112  *
4113  * Enables PCI bus-mastering for the device.  This is the default
4114  * implementation.  Architecture specific implementations can override
4115  * this if necessary.
4116  */
pcibios_set_master(struct pci_dev * dev)4117 void __weak pcibios_set_master(struct pci_dev *dev)
4118 {
4119 	u8 lat;
4120 
4121 	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4122 	if (pci_is_pcie(dev))
4123 		return;
4124 
4125 	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4126 	if (lat < 16)
4127 		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4128 	else if (lat > pcibios_max_latency)
4129 		lat = pcibios_max_latency;
4130 	else
4131 		return;
4132 
4133 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4134 }
4135 
4136 /**
4137  * pci_set_master - enables bus-mastering for device dev
4138  * @dev: the PCI device to enable
4139  *
4140  * Enables bus-mastering on the device and calls pcibios_set_master()
4141  * to do the needed arch specific settings.
4142  */
pci_set_master(struct pci_dev * dev)4143 void pci_set_master(struct pci_dev *dev)
4144 {
4145 	__pci_set_master(dev, true);
4146 	pcibios_set_master(dev);
4147 }
4148 EXPORT_SYMBOL(pci_set_master);
4149 
4150 /**
4151  * pci_clear_master - disables bus-mastering for device dev
4152  * @dev: the PCI device to disable
4153  */
pci_clear_master(struct pci_dev * dev)4154 void pci_clear_master(struct pci_dev *dev)
4155 {
4156 	__pci_set_master(dev, false);
4157 }
4158 EXPORT_SYMBOL(pci_clear_master);
4159 
4160 /**
4161  * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4162  * @dev: the PCI device for which MWI is to be enabled
4163  *
4164  * Helper function for pci_set_mwi.
4165  * Originally copied from drivers/net/acenic.c.
4166  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4167  *
4168  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4169  */
pci_set_cacheline_size(struct pci_dev * dev)4170 int pci_set_cacheline_size(struct pci_dev *dev)
4171 {
4172 	u8 cacheline_size;
4173 
4174 	if (!pci_cache_line_size)
4175 		return -EINVAL;
4176 
4177 	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4178 	   equal to or multiple of the right value. */
4179 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4180 	if (cacheline_size >= pci_cache_line_size &&
4181 	    (cacheline_size % pci_cache_line_size) == 0)
4182 		return 0;
4183 
4184 	/* Write the correct value. */
4185 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4186 	/* Read it back. */
4187 	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4188 	if (cacheline_size == pci_cache_line_size)
4189 		return 0;
4190 
4191 	pci_dbg(dev, "cache line size of %d is not supported\n",
4192 		   pci_cache_line_size << 2);
4193 
4194 	return -EINVAL;
4195 }
4196 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4197 
4198 /**
4199  * pci_set_mwi - enables memory-write-invalidate PCI transaction
4200  * @dev: the PCI device for which MWI is enabled
4201  *
4202  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4203  *
4204  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4205  */
pci_set_mwi(struct pci_dev * dev)4206 int pci_set_mwi(struct pci_dev *dev)
4207 {
4208 #ifdef PCI_DISABLE_MWI
4209 	return 0;
4210 #else
4211 	int rc;
4212 	u16 cmd;
4213 
4214 	rc = pci_set_cacheline_size(dev);
4215 	if (rc)
4216 		return rc;
4217 
4218 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4219 	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4220 		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4221 		cmd |= PCI_COMMAND_INVALIDATE;
4222 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4223 	}
4224 	return 0;
4225 #endif
4226 }
4227 EXPORT_SYMBOL(pci_set_mwi);
4228 
4229 /**
4230  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4231  * @dev: the PCI device for which MWI is enabled
4232  *
4233  * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4234  * Callers are not required to check the return value.
4235  *
4236  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4237  */
pci_try_set_mwi(struct pci_dev * dev)4238 int pci_try_set_mwi(struct pci_dev *dev)
4239 {
4240 #ifdef PCI_DISABLE_MWI
4241 	return 0;
4242 #else
4243 	return pci_set_mwi(dev);
4244 #endif
4245 }
4246 EXPORT_SYMBOL(pci_try_set_mwi);
4247 
4248 /**
4249  * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4250  * @dev: the PCI device to disable
4251  *
4252  * Disables PCI Memory-Write-Invalidate transaction on the device
4253  */
pci_clear_mwi(struct pci_dev * dev)4254 void pci_clear_mwi(struct pci_dev *dev)
4255 {
4256 #ifndef PCI_DISABLE_MWI
4257 	u16 cmd;
4258 
4259 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4260 	if (cmd & PCI_COMMAND_INVALIDATE) {
4261 		cmd &= ~PCI_COMMAND_INVALIDATE;
4262 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4263 	}
4264 #endif
4265 }
4266 EXPORT_SYMBOL(pci_clear_mwi);
4267 
4268 /**
4269  * pci_disable_parity - disable parity checking for device
4270  * @dev: the PCI device to operate on
4271  *
4272  * Disable parity checking for device @dev
4273  */
pci_disable_parity(struct pci_dev * dev)4274 void pci_disable_parity(struct pci_dev *dev)
4275 {
4276 	u16 cmd;
4277 
4278 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4279 	if (cmd & PCI_COMMAND_PARITY) {
4280 		cmd &= ~PCI_COMMAND_PARITY;
4281 		pci_write_config_word(dev, PCI_COMMAND, cmd);
4282 	}
4283 }
4284 
4285 /**
4286  * pci_intx - enables/disables PCI INTx for device dev
4287  * @pdev: the PCI device to operate on
4288  * @enable: boolean: whether to enable or disable PCI INTx
4289  *
4290  * Enables/disables PCI INTx for device @pdev
4291  */
pci_intx(struct pci_dev * pdev,int enable)4292 void pci_intx(struct pci_dev *pdev, int enable)
4293 {
4294 	u16 pci_command, new;
4295 
4296 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4297 
4298 	if (enable)
4299 		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4300 	else
4301 		new = pci_command | PCI_COMMAND_INTX_DISABLE;
4302 
4303 	if (new == pci_command)
4304 		return;
4305 
4306 	pci_write_config_word(pdev, PCI_COMMAND, new);
4307 }
4308 EXPORT_SYMBOL_GPL(pci_intx);
4309 
4310 /**
4311  * pci_wait_for_pending_transaction - wait for pending transaction
4312  * @dev: the PCI device to operate on
4313  *
4314  * Return 0 if transaction is pending 1 otherwise.
4315  */
pci_wait_for_pending_transaction(struct pci_dev * dev)4316 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4317 {
4318 	if (!pci_is_pcie(dev))
4319 		return 1;
4320 
4321 	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4322 				    PCI_EXP_DEVSTA_TRPND);
4323 }
4324 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4325 
4326 /**
4327  * pcie_flr - initiate a PCIe function level reset
4328  * @dev: device to reset
4329  *
4330  * Initiate a function level reset unconditionally on @dev without
4331  * checking any flags and DEVCAP
4332  */
pcie_flr(struct pci_dev * dev)4333 int pcie_flr(struct pci_dev *dev)
4334 {
4335 	int ret;
4336 
4337 	if (!pci_wait_for_pending_transaction(dev))
4338 		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4339 
4340 	/* Have to call it after waiting for pending DMA transaction */
4341 	ret = pci_dev_reset_iommu_prepare(dev);
4342 	if (ret) {
4343 		pci_err(dev, "failed to stop IOMMU for a PCI reset: %d\n", ret);
4344 		return ret;
4345 	}
4346 
4347 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4348 
4349 	if (dev->imm_ready)
4350 		goto done;
4351 
4352 	/*
4353 	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4354 	 * 100ms, but may silently discard requests while the FLR is in
4355 	 * progress.  Wait 100ms before trying to access the device.
4356 	 */
4357 	msleep(100);
4358 
4359 	ret = pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4360 done:
4361 	pci_dev_reset_iommu_done(dev);
4362 	return ret;
4363 }
4364 EXPORT_SYMBOL_GPL(pcie_flr);
4365 
4366 /**
4367  * pcie_reset_flr - initiate a PCIe function level reset
4368  * @dev: device to reset
4369  * @probe: if true, return 0 if device can be reset this way
4370  *
4371  * Initiate a function level reset on @dev.
4372  */
pcie_reset_flr(struct pci_dev * dev,bool probe)4373 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4374 {
4375 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4376 		return -ENOTTY;
4377 
4378 	if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4379 		return -ENOTTY;
4380 
4381 	if (probe)
4382 		return 0;
4383 
4384 	return pcie_flr(dev);
4385 }
4386 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4387 
pci_af_flr(struct pci_dev * dev,bool probe)4388 static int pci_af_flr(struct pci_dev *dev, bool probe)
4389 {
4390 	int ret;
4391 	int pos;
4392 	u8 cap;
4393 
4394 	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4395 	if (!pos)
4396 		return -ENOTTY;
4397 
4398 	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4399 		return -ENOTTY;
4400 
4401 	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4402 	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4403 		return -ENOTTY;
4404 
4405 	if (probe)
4406 		return 0;
4407 
4408 	/*
4409 	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4410 	 * is used, so we use the control offset rather than status and shift
4411 	 * the test bit to match.
4412 	 */
4413 	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4414 				 PCI_AF_STATUS_TP << 8))
4415 		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4416 
4417 	/* Have to call it after waiting for pending DMA transaction */
4418 	ret = pci_dev_reset_iommu_prepare(dev);
4419 	if (ret) {
4420 		pci_err(dev, "failed to stop IOMMU for a PCI reset: %d\n", ret);
4421 		return ret;
4422 	}
4423 
4424 	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4425 
4426 	if (dev->imm_ready)
4427 		goto done;
4428 
4429 	/*
4430 	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4431 	 * updated 27 July 2006; a device must complete an FLR within
4432 	 * 100ms, but may silently discard requests while the FLR is in
4433 	 * progress.  Wait 100ms before trying to access the device.
4434 	 */
4435 	msleep(100);
4436 
4437 	ret = pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4438 done:
4439 	pci_dev_reset_iommu_done(dev);
4440 	return ret;
4441 }
4442 
4443 /**
4444  * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4445  * @dev: Device to reset.
4446  * @probe: if true, return 0 if the device can be reset this way.
4447  *
4448  * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4449  * unset, it will be reinitialized internally when going from PCI_D3hot to
4450  * PCI_D0.  If that's the case and the device is not in a low-power state
4451  * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4452  *
4453  * NOTE: This causes the caller to sleep for twice the device power transition
4454  * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4455  * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4456  * Moreover, only devices in D0 can be reset by this function.
4457  */
pci_pm_reset(struct pci_dev * dev,bool probe)4458 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4459 {
4460 	u16 csr;
4461 	int ret;
4462 
4463 	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4464 		return -ENOTTY;
4465 
4466 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4467 	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4468 		return -ENOTTY;
4469 
4470 	if (probe)
4471 		return 0;
4472 
4473 	if (dev->current_state != PCI_D0)
4474 		return -EINVAL;
4475 
4476 	ret = pci_dev_reset_iommu_prepare(dev);
4477 	if (ret) {
4478 		pci_err(dev, "failed to stop IOMMU for a PCI reset: %d\n", ret);
4479 		return ret;
4480 	}
4481 
4482 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4483 	csr |= PCI_D3hot;
4484 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4485 	pci_dev_d3_sleep(dev);
4486 
4487 	csr &= ~PCI_PM_CTRL_STATE_MASK;
4488 	csr |= PCI_D0;
4489 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4490 	pci_dev_d3_sleep(dev);
4491 
4492 	ret = pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4493 	pci_dev_reset_iommu_done(dev);
4494 	return ret;
4495 }
4496 
4497 /**
4498  * pcie_wait_for_link_status - Wait for link status change
4499  * @pdev: Device whose link to wait for.
4500  * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE.
4501  * @active: Waiting for active or inactive?
4502  *
4503  * Return 0 if successful, or -ETIMEDOUT if status has not changed within
4504  * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4505  */
pcie_wait_for_link_status(struct pci_dev * pdev,bool use_lt,bool active)4506 static int pcie_wait_for_link_status(struct pci_dev *pdev,
4507 				     bool use_lt, bool active)
4508 {
4509 	u16 lnksta_mask, lnksta_match;
4510 	unsigned long end_jiffies;
4511 	u16 lnksta;
4512 
4513 	lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA;
4514 	lnksta_match = active ? lnksta_mask : 0;
4515 
4516 	end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS);
4517 	do {
4518 		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
4519 		if ((lnksta & lnksta_mask) == lnksta_match)
4520 			return 0;
4521 		msleep(1);
4522 	} while (time_before(jiffies, end_jiffies));
4523 
4524 	return -ETIMEDOUT;
4525 }
4526 
4527 /**
4528  * pcie_retrain_link - Request a link retrain and wait for it to complete
4529  * @pdev: Device whose link to retrain.
4530  * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
4531  *
4532  * Trigger retraining of the PCIe Link and wait for the completion of the
4533  * retraining. As link retraining is known to asserts LBMS and may change
4534  * the Link Speed, LBMS is cleared after the retraining and the Link Speed
4535  * of the subordinate bus is updated.
4536  *
4537  * Retrain completion status is retrieved from the Link Status Register
4538  * according to @use_lt.  It is not verified whether the use of the DLLLA
4539  * bit is valid.
4540  *
4541  * Return 0 if successful, or -ETIMEDOUT if training has not completed
4542  * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4543  */
pcie_retrain_link(struct pci_dev * pdev,bool use_lt)4544 int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
4545 {
4546 	int rc;
4547 
4548 	/*
4549 	 * Ensure the updated LNKCTL parameters are used during link
4550 	 * training by checking that there is no ongoing link training that
4551 	 * may have started before link parameters were changed, so as to
4552 	 * avoid LTSSM race as recommended in Implementation Note at the end
4553 	 * of PCIe r6.1 sec 7.5.3.7.
4554 	 */
4555 	rc = pcie_wait_for_link_status(pdev, true, false);
4556 	if (rc)
4557 		return rc;
4558 
4559 	pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4560 	if (pdev->clear_retrain_link) {
4561 		/*
4562 		 * Due to an erratum in some devices the Retrain Link bit
4563 		 * needs to be cleared again manually to allow the link
4564 		 * training to succeed.
4565 		 */
4566 		pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4567 	}
4568 
4569 	rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
4570 
4571 	/*
4572 	 * Clear LBMS after a manual retrain so that the bit can be used
4573 	 * to track link speed or width changes made by hardware itself
4574 	 * in attempt to correct unreliable link operation.
4575 	 */
4576 	pcie_reset_lbms(pdev);
4577 
4578 	/*
4579 	 * Ensure the Link Speed updates after retraining in case the Link
4580 	 * Speed was changed because of the retraining. While the bwctrl's
4581 	 * IRQ handler normally picks up the new Link Speed, clearing LBMS
4582 	 * races with the IRQ handler reading the Link Status register and
4583 	 * can result in the handler returning early without updating the
4584 	 * Link Speed.
4585 	 */
4586 	if (pdev->subordinate)
4587 		pcie_update_link_speed(pdev->subordinate, PCIE_LINK_RETRAIN);
4588 
4589 	return rc;
4590 }
4591 
4592 /**
4593  * pcie_wait_for_link_delay - Wait until link is active or inactive
4594  * @pdev: Bridge device
4595  * @active: waiting for active or inactive?
4596  * @delay: Delay to wait after link has become active (in ms)
4597  *
4598  * Use this to wait till link becomes active or inactive.
4599  */
pcie_wait_for_link_delay(struct pci_dev * pdev,bool active,int delay)4600 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4601 				     int delay)
4602 {
4603 	int rc;
4604 
4605 	/*
4606 	 * Some controllers might not implement link active reporting. In this
4607 	 * case, we wait for 1000 ms + any delay requested by the caller.
4608 	 */
4609 	if (!pdev->link_active_reporting) {
4610 		msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay);
4611 		return true;
4612 	}
4613 
4614 	/*
4615 	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4616 	 * after which we should expect the link to be active if the reset was
4617 	 * successful. If so, software must wait a minimum 100ms before sending
4618 	 * configuration requests to devices downstream this port.
4619 	 *
4620 	 * If the link fails to activate, either the device was physically
4621 	 * removed or the link is permanently failed.
4622 	 */
4623 	if (active)
4624 		msleep(20);
4625 	rc = pcie_wait_for_link_status(pdev, false, active);
4626 	if (active) {
4627 		if (rc)
4628 			rc = pcie_failed_link_retrain(pdev);
4629 		if (rc)
4630 			return false;
4631 
4632 		msleep(delay);
4633 		return true;
4634 	}
4635 
4636 	if (rc)
4637 		return false;
4638 
4639 	return true;
4640 }
4641 
4642 /**
4643  * pcie_wait_for_link - Wait until link is active or inactive
4644  * @pdev: Bridge device
4645  * @active: waiting for active or inactive?
4646  *
4647  * Use this to wait till link becomes active or inactive.
4648  */
pcie_wait_for_link(struct pci_dev * pdev,bool active)4649 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4650 {
4651 	return pcie_wait_for_link_delay(pdev, active, 100);
4652 }
4653 
4654 /*
4655  * Find maximum D3cold delay required by all the devices on the bus.  The
4656  * spec says 100 ms, but firmware can lower it and we allow drivers to
4657  * increase it as well.
4658  *
4659  * Context: Called with @pci_bus_sem locked for reading.
4660  */
pci_bus_max_d3cold_delay(const struct pci_bus * bus)4661 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4662 {
4663 	const struct pci_dev *pdev;
4664 	int min_delay = 100;
4665 	int max_delay = 0;
4666 
4667 	lockdep_assert_held(&pci_bus_sem);
4668 
4669 	list_for_each_entry(pdev, &bus->devices, bus_list) {
4670 		if (pdev->d3cold_delay < min_delay)
4671 			min_delay = pdev->d3cold_delay;
4672 		if (pdev->d3cold_delay > max_delay)
4673 			max_delay = pdev->d3cold_delay;
4674 	}
4675 
4676 	return max(min_delay, max_delay);
4677 }
4678 
4679 /**
4680  * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4681  * @dev: PCI bridge
4682  * @reset_type: reset type in human-readable form
4683  *
4684  * Handle necessary delays before access to the devices on the secondary
4685  * side of the bridge are permitted after D3cold to D0 transition
4686  * or Conventional Reset.
4687  *
4688  * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4689  * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4690  * 4.3.2.
4691  *
4692  * Return 0 on success or -ENOTTY if the first device on the secondary bus
4693  * failed to become accessible.
4694  */
pci_bridge_wait_for_secondary_bus(struct pci_dev * dev,char * reset_type)4695 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
4696 {
4697 	struct pci_dev *child __free(pci_dev_put) = NULL;
4698 	int delay;
4699 
4700 	if (pci_dev_is_disconnected(dev))
4701 		return 0;
4702 
4703 	if (!pci_is_bridge(dev))
4704 		return 0;
4705 
4706 	down_read(&pci_bus_sem);
4707 
4708 	/*
4709 	 * We only deal with devices that are present currently on the bus.
4710 	 * For any hot-added devices the access delay is handled in pciehp
4711 	 * board_added(). In case of ACPI hotplug the firmware is expected
4712 	 * to configure the devices before OS is notified.
4713 	 */
4714 	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4715 		up_read(&pci_bus_sem);
4716 		return 0;
4717 	}
4718 
4719 	/* Take d3cold_delay requirements into account */
4720 	delay = pci_bus_max_d3cold_delay(dev->subordinate);
4721 	if (!delay) {
4722 		up_read(&pci_bus_sem);
4723 		return 0;
4724 	}
4725 
4726 	child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
4727 					     struct pci_dev, bus_list));
4728 	up_read(&pci_bus_sem);
4729 
4730 	/*
4731 	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4732 	 * accessing the device after reset (that is 1000 ms + 100 ms).
4733 	 */
4734 	if (!pci_is_pcie(dev)) {
4735 		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4736 		msleep(1000 + delay);
4737 		return 0;
4738 	}
4739 
4740 	/*
4741 	 * For PCIe downstream and root ports that do not support speeds
4742 	 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4743 	 * speeds (gen3) we need to wait first for the data link layer to
4744 	 * become active.
4745 	 *
4746 	 * However, 100 ms is the minimum and the PCIe spec says the
4747 	 * software must allow at least 1s before it can determine that the
4748 	 * device that did not respond is a broken device. Also device can
4749 	 * take longer than that to respond if it indicates so through Request
4750 	 * Retry Status completions.
4751 	 *
4752 	 * Therefore we wait for 100 ms and check for the device presence
4753 	 * until the timeout expires.
4754 	 */
4755 	if (!pcie_downstream_port(dev))
4756 		return 0;
4757 
4758 	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4759 		u16 status;
4760 
4761 		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4762 		msleep(delay);
4763 
4764 		if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay))
4765 			return 0;
4766 
4767 		/*
4768 		 * If the port supports active link reporting we now check
4769 		 * whether the link is active and if not bail out early with
4770 		 * the assumption that the device is not present anymore.
4771 		 */
4772 		if (!dev->link_active_reporting)
4773 			return -ENOTTY;
4774 
4775 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status);
4776 		if (!(status & PCI_EXP_LNKSTA_DLLLA))
4777 			return -ENOTTY;
4778 
4779 		return pci_dev_wait(child, reset_type,
4780 				    PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT);
4781 	}
4782 
4783 	pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4784 		delay);
4785 	if (!pcie_wait_for_link_delay(dev, true, delay)) {
4786 		/* Did not train, no need to wait any further */
4787 		pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay);
4788 		return -ENOTTY;
4789 	}
4790 
4791 	return pci_dev_wait(child, reset_type,
4792 			    PCIE_RESET_READY_POLL_MS - delay);
4793 }
4794 
pci_reset_secondary_bus(struct pci_dev * dev)4795 void pci_reset_secondary_bus(struct pci_dev *dev)
4796 {
4797 	u16 ctrl;
4798 
4799 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4800 	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4801 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4802 
4803 	/*
4804 	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
4805 	 * this to 2ms to ensure that we meet the minimum requirement.
4806 	 */
4807 	msleep(2);
4808 
4809 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4810 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4811 }
4812 
pcibios_reset_secondary_bus(struct pci_dev * dev)4813 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4814 {
4815 	pci_reset_secondary_bus(dev);
4816 }
4817 
4818 /**
4819  * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4820  * @dev: Bridge device
4821  *
4822  * Use the bridge control register to assert reset on the secondary bus.
4823  * Devices on the secondary bus are left in power-on state.
4824  */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)4825 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4826 {
4827 	if (!dev->block_cfg_access)
4828 		pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
4829 			      __builtin_return_address(0));
4830 	pcibios_reset_secondary_bus(dev);
4831 
4832 	return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
4833 }
4834 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4835 
pci_parent_bus_reset(struct pci_dev * dev,bool probe)4836 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
4837 {
4838 	struct pci_dev *pdev;
4839 
4840 	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4841 	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4842 		return -ENOTTY;
4843 
4844 	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4845 		if (pdev != dev)
4846 			return -ENOTTY;
4847 
4848 	if (probe)
4849 		return 0;
4850 
4851 	return pci_bridge_secondary_bus_reset(dev->bus->self);
4852 }
4853 
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,bool probe)4854 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
4855 {
4856 	int rc = -ENOTTY;
4857 
4858 	if (!hotplug || !try_module_get(hotplug->owner))
4859 		return rc;
4860 
4861 	if (hotplug->ops->reset_slot)
4862 		rc = hotplug->ops->reset_slot(hotplug, probe);
4863 
4864 	module_put(hotplug->owner);
4865 
4866 	return rc;
4867 }
4868 
pci_dev_reset_slot_function(struct pci_dev * dev,bool probe)4869 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
4870 {
4871 	if (dev->multifunction || dev->subordinate || !dev->slot ||
4872 	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4873 		return -ENOTTY;
4874 
4875 	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4876 }
4877 
cxl_port_dvsec(struct pci_dev * dev)4878 static u16 cxl_port_dvsec(struct pci_dev *dev)
4879 {
4880 	return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
4881 					 PCI_DVSEC_CXL_PORT);
4882 }
4883 
cxl_sbr_masked(struct pci_dev * dev)4884 static bool cxl_sbr_masked(struct pci_dev *dev)
4885 {
4886 	u16 dvsec, reg;
4887 	int rc;
4888 
4889 	dvsec = cxl_port_dvsec(dev);
4890 	if (!dvsec)
4891 		return false;
4892 
4893 	rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
4894 	if (rc || PCI_POSSIBLE_ERROR(reg))
4895 		return false;
4896 
4897 	/*
4898 	 * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR
4899 	 * bit in Bridge Control has no effect.  When 1, the Port generates
4900 	 * hot reset when the SBR bit is set to 1.
4901 	 */
4902 	if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR)
4903 		return false;
4904 
4905 	return true;
4906 }
4907 
pci_reset_bus_function(struct pci_dev * dev,bool probe)4908 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
4909 {
4910 	struct pci_dev *bridge = pci_upstream_bridge(dev);
4911 	int rc;
4912 
4913 	/*
4914 	 * If "dev" is below a CXL port that has SBR control masked, SBR
4915 	 * won't do anything, so return error.
4916 	 */
4917 	if (bridge && cxl_sbr_masked(bridge)) {
4918 		if (probe)
4919 			return 0;
4920 
4921 		return -ENOTTY;
4922 	}
4923 
4924 	rc = pci_dev_reset_iommu_prepare(dev);
4925 	if (rc) {
4926 		pci_err(dev, "failed to stop IOMMU for a PCI reset: %d\n", rc);
4927 		return rc;
4928 	}
4929 
4930 	rc = pci_dev_reset_slot_function(dev, probe);
4931 	if (rc != -ENOTTY)
4932 		goto done;
4933 
4934 	rc = pci_parent_bus_reset(dev, probe);
4935 done:
4936 	pci_dev_reset_iommu_done(dev);
4937 	return rc;
4938 }
4939 
cxl_reset_bus_function(struct pci_dev * dev,bool probe)4940 static int cxl_reset_bus_function(struct pci_dev *dev, bool probe)
4941 {
4942 	struct pci_dev *bridge;
4943 	u16 dvsec, reg, val;
4944 	int rc;
4945 
4946 	bridge = pci_upstream_bridge(dev);
4947 	if (!bridge)
4948 		return -ENOTTY;
4949 
4950 	dvsec = cxl_port_dvsec(bridge);
4951 	if (!dvsec)
4952 		return -ENOTTY;
4953 
4954 	if (probe)
4955 		return 0;
4956 
4957 	rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
4958 	if (rc)
4959 		return -ENOTTY;
4960 
4961 	rc = pci_dev_reset_iommu_prepare(dev);
4962 	if (rc) {
4963 		pci_err(dev, "failed to stop IOMMU for a PCI reset: %d\n", rc);
4964 		return rc;
4965 	}
4966 
4967 	if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) {
4968 		val = reg;
4969 	} else {
4970 		val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR;
4971 		pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
4972 				      val);
4973 	}
4974 
4975 	rc = pci_reset_bus_function(dev, probe);
4976 
4977 	if (reg != val)
4978 		pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
4979 				      reg);
4980 
4981 	pci_dev_reset_iommu_done(dev);
4982 	return rc;
4983 }
4984 
pci_dev_lock(struct pci_dev * dev)4985 void pci_dev_lock(struct pci_dev *dev)
4986 {
4987 	/* block PM suspend, driver probe, etc. */
4988 	device_lock(&dev->dev);
4989 	pci_cfg_access_lock(dev);
4990 }
4991 EXPORT_SYMBOL_GPL(pci_dev_lock);
4992 
4993 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)4994 int pci_dev_trylock(struct pci_dev *dev)
4995 {
4996 	if (device_trylock(&dev->dev)) {
4997 		if (pci_cfg_access_trylock(dev))
4998 			return 1;
4999 		device_unlock(&dev->dev);
5000 	}
5001 
5002 	return 0;
5003 }
5004 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5005 
pci_dev_unlock(struct pci_dev * dev)5006 void pci_dev_unlock(struct pci_dev *dev)
5007 {
5008 	pci_cfg_access_unlock(dev);
5009 	device_unlock(&dev->dev);
5010 }
5011 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5012 
pci_dev_save_and_disable(struct pci_dev * dev)5013 static void pci_dev_save_and_disable(struct pci_dev *dev)
5014 {
5015 	const struct pci_error_handlers *err_handler =
5016 			dev->driver ? dev->driver->err_handler : NULL;
5017 
5018 	/*
5019 	 * dev->driver->err_handler->reset_prepare() is protected against
5020 	 * races with ->remove() by the device lock, which must be held by
5021 	 * the caller.
5022 	 */
5023 	device_lock_assert(&dev->dev);
5024 	if (err_handler && err_handler->reset_prepare)
5025 		err_handler->reset_prepare(dev);
5026 	else if (dev->driver)
5027 		pci_warn(dev, "resetting");
5028 
5029 	/*
5030 	 * Wake-up device prior to save.  PM registers default to D0 after
5031 	 * reset and a simple register restore doesn't reliably return
5032 	 * to a non-D0 state anyway.
5033 	 */
5034 	pci_set_power_state(dev, PCI_D0);
5035 
5036 	pci_save_state(dev);
5037 	/*
5038 	 * Disable the device by clearing the Command register, except for
5039 	 * INTx-disable which is set.  This not only disables MMIO and I/O port
5040 	 * BARs, but also prevents the device from being Bus Master, preventing
5041 	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
5042 	 * compliant devices, INTx-disable prevents legacy interrupts.
5043 	 */
5044 	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5045 }
5046 
pci_dev_restore(struct pci_dev * dev)5047 static void pci_dev_restore(struct pci_dev *dev)
5048 {
5049 	const struct pci_error_handlers *err_handler =
5050 			dev->driver ? dev->driver->err_handler : NULL;
5051 
5052 	pci_restore_state(dev);
5053 
5054 	/*
5055 	 * dev->driver->err_handler->reset_done() is protected against
5056 	 * races with ->remove() by the device lock, which must be held by
5057 	 * the caller.
5058 	 */
5059 	if (err_handler && err_handler->reset_done)
5060 		err_handler->reset_done(dev);
5061 	else if (dev->driver)
5062 		pci_warn(dev, "reset done");
5063 }
5064 
5065 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5066 const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5067 	{ },
5068 	{ pci_dev_specific_reset, .name = "device_specific" },
5069 	{ pci_dev_acpi_reset, .name = "acpi" },
5070 	{ pcie_reset_flr, .name = "flr" },
5071 	{ pci_af_flr, .name = "af_flr" },
5072 	{ pci_pm_reset, .name = "pm" },
5073 	{ pci_reset_bus_function, .name = "bus" },
5074 	{ cxl_reset_bus_function, .name = "cxl_bus" },
5075 };
5076 
5077 /**
5078  * __pci_reset_function_locked - reset a PCI device function while holding
5079  * the @dev mutex lock.
5080  * @dev: PCI device to reset
5081  *
5082  * Some devices allow an individual function to be reset without affecting
5083  * other functions in the same device.  The PCI device must be responsive
5084  * to PCI config space in order to use this function.
5085  *
5086  * The device function is presumed to be unused and the caller is holding
5087  * the device mutex lock when this function is called.
5088  *
5089  * Resetting the device will make the contents of PCI configuration space
5090  * random, so any caller of this must be prepared to reinitialise the
5091  * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5092  * etc.
5093  *
5094  * Context: The caller must hold the device lock.
5095  *
5096  * Return: 0 if the device function was successfully reset or negative if the
5097  * device doesn't support resetting a single function.
5098  */
__pci_reset_function_locked(struct pci_dev * dev)5099 int __pci_reset_function_locked(struct pci_dev *dev)
5100 {
5101 	int i, m, rc;
5102 	const struct pci_reset_fn_method *method;
5103 
5104 	might_sleep();
5105 	device_lock_assert(&dev->dev);
5106 
5107 	/*
5108 	 * A reset method returns -ENOTTY if it doesn't support this device and
5109 	 * we should try the next method.
5110 	 *
5111 	 * If it returns 0 (success), we're finished.  If it returns any other
5112 	 * error, we're also finished: this indicates that further reset
5113 	 * mechanisms might be broken on the device.
5114 	 */
5115 	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5116 		m = dev->reset_methods[i];
5117 		if (!m)
5118 			return -ENOTTY;
5119 
5120 		method = &pci_reset_fn_methods[m];
5121 		pci_dbg(dev, "reset via %s\n", method->name);
5122 		rc = method->reset_fn(dev, PCI_RESET_DO_RESET);
5123 		if (!rc)
5124 			return 0;
5125 
5126 		pci_dbg(dev, "%s failed with %d\n", method->name, rc);
5127 		if (rc != -ENOTTY)
5128 			return rc;
5129 	}
5130 
5131 	return -ENOTTY;
5132 }
5133 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5134 
5135 /**
5136  * pci_init_reset_methods - check whether device can be safely reset
5137  * and store supported reset mechanisms.
5138  * @dev: PCI device to check for reset mechanisms
5139  *
5140  * Some devices allow an individual function to be reset without affecting
5141  * other functions in the same device.  The PCI device must be in D0-D3hot
5142  * state.
5143  *
5144  * Stores reset mechanisms supported by device in reset_methods byte array
5145  * which is a member of struct pci_dev.
5146  */
pci_init_reset_methods(struct pci_dev * dev)5147 void pci_init_reset_methods(struct pci_dev *dev)
5148 {
5149 	int m, i, rc;
5150 
5151 	BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5152 
5153 	might_sleep();
5154 
5155 	i = 0;
5156 	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5157 		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5158 		if (!rc)
5159 			dev->reset_methods[i++] = m;
5160 		else if (rc != -ENOTTY)
5161 			break;
5162 	}
5163 
5164 	dev->reset_methods[i] = 0;
5165 }
5166 
5167 /**
5168  * pci_reset_function - quiesce and reset a PCI device function
5169  * @dev: PCI device to reset
5170  *
5171  * Some devices allow an individual function to be reset without affecting
5172  * other functions in the same device.  The PCI device must be responsive
5173  * to PCI config space in order to use this function.
5174  *
5175  * This function does not just reset the PCI portion of a device, but
5176  * clears all the state associated with the device.  This function differs
5177  * from __pci_reset_function_locked() in that it saves and restores device state
5178  * over the reset and takes the PCI device lock.
5179  *
5180  * Returns 0 if the device function was successfully reset or negative if the
5181  * device doesn't support resetting a single function.
5182  */
pci_reset_function(struct pci_dev * dev)5183 int pci_reset_function(struct pci_dev *dev)
5184 {
5185 	struct pci_dev *bridge;
5186 	int rc;
5187 
5188 	if (!pci_reset_supported(dev))
5189 		return -ENOTTY;
5190 
5191 	/*
5192 	 * If there's no upstream bridge, no locking is needed since there is
5193 	 * no upstream bridge configuration to hold consistent.
5194 	 */
5195 	bridge = pci_upstream_bridge(dev);
5196 	if (bridge)
5197 		pci_dev_lock(bridge);
5198 
5199 	pci_dev_lock(dev);
5200 	pci_dev_save_and_disable(dev);
5201 
5202 	rc = __pci_reset_function_locked(dev);
5203 
5204 	pci_dev_restore(dev);
5205 	pci_dev_unlock(dev);
5206 
5207 	if (bridge)
5208 		pci_dev_unlock(bridge);
5209 
5210 	return rc;
5211 }
5212 EXPORT_SYMBOL_GPL(pci_reset_function);
5213 
5214 /**
5215  * pci_reset_function_locked - quiesce and reset a PCI device function
5216  * @dev: PCI device to reset
5217  *
5218  * Some devices allow an individual function to be reset without affecting
5219  * other functions in the same device.  The PCI device must be responsive
5220  * to PCI config space in order to use this function.
5221  *
5222  * This function does not just reset the PCI portion of a device, but
5223  * clears all the state associated with the device.  This function differs
5224  * from __pci_reset_function_locked() in that it saves and restores device state
5225  * over the reset.  It also differs from pci_reset_function() in that it
5226  * requires the PCI device lock to be held.
5227  *
5228  * Context: The caller must hold the device lock.
5229  *
5230  * Return: 0 if the device function was successfully reset or negative if the
5231  * device doesn't support resetting a single function.
5232  */
pci_reset_function_locked(struct pci_dev * dev)5233 int pci_reset_function_locked(struct pci_dev *dev)
5234 {
5235 	int rc;
5236 
5237 	device_lock_assert(&dev->dev);
5238 
5239 	if (!pci_reset_supported(dev))
5240 		return -ENOTTY;
5241 
5242 	pci_dev_save_and_disable(dev);
5243 
5244 	rc = __pci_reset_function_locked(dev);
5245 
5246 	pci_dev_restore(dev);
5247 
5248 	return rc;
5249 }
5250 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5251 
5252 /**
5253  * pci_try_reset_function - quiesce and reset a PCI device function
5254  * @dev: PCI device to reset
5255  *
5256  * Same as above, except return -EAGAIN if unable to lock device.
5257  */
pci_try_reset_function(struct pci_dev * dev)5258 int pci_try_reset_function(struct pci_dev *dev)
5259 {
5260 	int rc;
5261 
5262 	if (!pci_reset_supported(dev))
5263 		return -ENOTTY;
5264 
5265 	if (!pci_dev_trylock(dev))
5266 		return -EAGAIN;
5267 
5268 	pci_dev_save_and_disable(dev);
5269 	rc = __pci_reset_function_locked(dev);
5270 	pci_dev_restore(dev);
5271 	pci_dev_unlock(dev);
5272 
5273 	return rc;
5274 }
5275 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5276 
5277 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resettable(struct pci_bus * bus)5278 static bool pci_bus_resettable(struct pci_bus *bus)
5279 {
5280 	struct pci_dev *dev;
5281 
5282 
5283 	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5284 		return false;
5285 
5286 	list_for_each_entry(dev, &bus->devices, bus_list) {
5287 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5288 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5289 			return false;
5290 	}
5291 
5292 	return true;
5293 }
5294 
5295 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5296 static void pci_bus_lock(struct pci_bus *bus)
5297 {
5298 	struct pci_dev *dev;
5299 
5300 	pci_dev_lock(bus->self);
5301 	list_for_each_entry(dev, &bus->devices, bus_list) {
5302 		if (dev->subordinate)
5303 			pci_bus_lock(dev->subordinate);
5304 		else
5305 			pci_dev_lock(dev);
5306 	}
5307 }
5308 
5309 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5310 static void pci_bus_unlock(struct pci_bus *bus)
5311 {
5312 	struct pci_dev *dev;
5313 
5314 	list_for_each_entry(dev, &bus->devices, bus_list) {
5315 		if (dev->subordinate)
5316 			pci_bus_unlock(dev->subordinate);
5317 		else
5318 			pci_dev_unlock(dev);
5319 	}
5320 	pci_dev_unlock(bus->self);
5321 }
5322 
5323 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5324 static int pci_bus_trylock(struct pci_bus *bus)
5325 {
5326 	struct pci_dev *dev;
5327 
5328 	if (!pci_dev_trylock(bus->self))
5329 		return 0;
5330 
5331 	list_for_each_entry(dev, &bus->devices, bus_list) {
5332 		if (dev->subordinate) {
5333 			if (!pci_bus_trylock(dev->subordinate))
5334 				goto unlock;
5335 		} else if (!pci_dev_trylock(dev))
5336 			goto unlock;
5337 	}
5338 	return 1;
5339 
5340 unlock:
5341 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5342 		if (dev->subordinate)
5343 			pci_bus_unlock(dev->subordinate);
5344 		else
5345 			pci_dev_unlock(dev);
5346 	}
5347 	pci_dev_unlock(bus->self);
5348 	return 0;
5349 }
5350 
5351 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resettable(struct pci_slot * slot)5352 static bool pci_slot_resettable(struct pci_slot *slot)
5353 {
5354 	struct pci_dev *dev, *bridge = slot->bus->self;
5355 
5356 	if (bridge && (bridge->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5357 		return false;
5358 
5359 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5360 		if (!dev->slot || dev->slot != slot)
5361 			continue;
5362 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5363 		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5364 			return false;
5365 	}
5366 
5367 	return true;
5368 }
5369 
5370 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5371 static void pci_slot_lock(struct pci_slot *slot)
5372 {
5373 	struct pci_dev *dev, *bridge = slot->bus->self;
5374 
5375 	if (bridge)
5376 		pci_dev_lock(bridge);
5377 
5378 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5379 		if (!dev->slot || dev->slot != slot)
5380 			continue;
5381 		if (dev->subordinate)
5382 			pci_bus_lock(dev->subordinate);
5383 		else
5384 			pci_dev_lock(dev);
5385 	}
5386 }
5387 
5388 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5389 static void pci_slot_unlock(struct pci_slot *slot)
5390 {
5391 	struct pci_dev *dev, *bridge = slot->bus->self;
5392 
5393 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5394 		if (!dev->slot || dev->slot != slot)
5395 			continue;
5396 		if (dev->subordinate)
5397 			pci_bus_unlock(dev->subordinate);
5398 		else
5399 			pci_dev_unlock(dev);
5400 	}
5401 
5402 	if (bridge)
5403 		pci_dev_unlock(bridge);
5404 }
5405 
5406 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5407 static int pci_slot_trylock(struct pci_slot *slot)
5408 {
5409 	struct pci_dev *dev, *bridge = slot->bus->self;
5410 
5411 	if (bridge && !pci_dev_trylock(bridge))
5412 		return 0;
5413 
5414 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5415 		if (!dev->slot || dev->slot != slot)
5416 			continue;
5417 		if (dev->subordinate) {
5418 			if (!pci_bus_trylock(dev->subordinate))
5419 				goto unlock;
5420 		} else if (!pci_dev_trylock(dev))
5421 			goto unlock;
5422 	}
5423 	return 1;
5424 
5425 unlock:
5426 	list_for_each_entry_continue_reverse(dev,
5427 					     &slot->bus->devices, bus_list) {
5428 		if (!dev->slot || dev->slot != slot)
5429 			continue;
5430 		if (dev->subordinate)
5431 			pci_bus_unlock(dev->subordinate);
5432 		else
5433 			pci_dev_unlock(dev);
5434 	}
5435 
5436 	if (bridge)
5437 		pci_dev_unlock(bridge);
5438 	return 0;
5439 }
5440 
5441 /*
5442  * Save and disable devices from the top of the tree down while holding
5443  * the @dev mutex lock for the entire tree.
5444  */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5445 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5446 {
5447 	struct pci_dev *dev;
5448 
5449 	list_for_each_entry(dev, &bus->devices, bus_list) {
5450 		pci_dev_save_and_disable(dev);
5451 		if (dev->subordinate)
5452 			pci_bus_save_and_disable_locked(dev->subordinate);
5453 	}
5454 }
5455 
5456 /*
5457  * Restore devices from top of the tree down while holding @dev mutex lock
5458  * for the entire tree.  Parent bridges need to be restored before we can
5459  * get to subordinate devices.
5460  */
pci_bus_restore_locked(struct pci_bus * bus)5461 static void pci_bus_restore_locked(struct pci_bus *bus)
5462 {
5463 	struct pci_dev *dev;
5464 
5465 	list_for_each_entry(dev, &bus->devices, bus_list) {
5466 		pci_dev_restore(dev);
5467 		if (dev->subordinate) {
5468 			pci_bridge_wait_for_secondary_bus(dev, "bus reset");
5469 			pci_bus_restore_locked(dev->subordinate);
5470 		}
5471 	}
5472 }
5473 
5474 /*
5475  * Save and disable devices from the top of the tree down while holding
5476  * the @dev mutex lock for the entire tree.
5477  */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5478 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5479 {
5480 	struct pci_dev *dev;
5481 
5482 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5483 		if (!dev->slot || dev->slot != slot)
5484 			continue;
5485 		pci_dev_save_and_disable(dev);
5486 		if (dev->subordinate)
5487 			pci_bus_save_and_disable_locked(dev->subordinate);
5488 	}
5489 }
5490 
5491 /*
5492  * Restore devices from top of the tree down while holding @dev mutex lock
5493  * for the entire tree.  Parent bridges need to be restored before we can
5494  * get to subordinate devices.
5495  */
pci_slot_restore_locked(struct pci_slot * slot)5496 static void pci_slot_restore_locked(struct pci_slot *slot)
5497 {
5498 	struct pci_dev *dev;
5499 
5500 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5501 		if (!dev->slot || dev->slot != slot)
5502 			continue;
5503 		pci_dev_restore(dev);
5504 		if (dev->subordinate) {
5505 			pci_bridge_wait_for_secondary_bus(dev, "slot reset");
5506 			pci_bus_restore_locked(dev->subordinate);
5507 		}
5508 	}
5509 }
5510 
pci_slot_reset(struct pci_slot * slot,bool probe)5511 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5512 {
5513 	int rc;
5514 
5515 	if (!slot || !pci_slot_resettable(slot))
5516 		return -ENOTTY;
5517 
5518 	if (!probe)
5519 		pci_slot_lock(slot);
5520 
5521 	might_sleep();
5522 
5523 	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5524 
5525 	if (!probe)
5526 		pci_slot_unlock(slot);
5527 
5528 	return rc;
5529 }
5530 
5531 /**
5532  * pci_probe_reset_slot - probe whether a PCI slot can be reset
5533  * @slot: PCI slot to probe
5534  *
5535  * Return 0 if slot can be reset, negative if a slot reset is not supported.
5536  */
pci_probe_reset_slot(struct pci_slot * slot)5537 int pci_probe_reset_slot(struct pci_slot *slot)
5538 {
5539 	return pci_slot_reset(slot, PCI_RESET_PROBE);
5540 }
5541 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5542 
5543 /**
5544  * __pci_reset_slot - Try to reset a PCI slot
5545  * @slot: PCI slot to reset
5546  *
5547  * A PCI bus may host multiple slots, each slot may support a reset mechanism
5548  * independent of other slots.  For instance, some slots may support slot power
5549  * control.  In the case of a 1:1 bus to slot architecture, this function may
5550  * wrap the bus reset to avoid spurious slot related events such as hotplug.
5551  * Generally a slot reset should be attempted before a bus reset.  All of the
5552  * function of the slot and any subordinate buses behind the slot are reset
5553  * through this function.  PCI config space of all devices in the slot and
5554  * behind the slot is saved before and restored after reset.
5555  *
5556  * Same as above except return -EAGAIN if the slot cannot be locked
5557  */
__pci_reset_slot(struct pci_slot * slot)5558 static int __pci_reset_slot(struct pci_slot *slot)
5559 {
5560 	int rc;
5561 
5562 	rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5563 	if (rc)
5564 		return rc;
5565 
5566 	if (pci_slot_trylock(slot)) {
5567 		pci_slot_save_and_disable_locked(slot);
5568 		might_sleep();
5569 		rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5570 		pci_slot_restore_locked(slot);
5571 		pci_slot_unlock(slot);
5572 	} else
5573 		rc = -EAGAIN;
5574 
5575 	return rc;
5576 }
5577 
pci_bus_reset(struct pci_bus * bus,bool probe)5578 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5579 {
5580 	int ret;
5581 
5582 	if (!bus->self || !pci_bus_resettable(bus))
5583 		return -ENOTTY;
5584 
5585 	if (probe)
5586 		return 0;
5587 
5588 	pci_bus_lock(bus);
5589 
5590 	might_sleep();
5591 
5592 	ret = pci_bridge_secondary_bus_reset(bus->self);
5593 
5594 	pci_bus_unlock(bus);
5595 
5596 	return ret;
5597 }
5598 
5599 /**
5600  * pci_bus_error_reset - reset the bridge's subordinate bus
5601  * @bridge: The parent device that connects to the bus to reset
5602  *
5603  * This function will first try to reset the slots on this bus if the method is
5604  * available. If slot reset fails or is not available, this will fall back to a
5605  * secondary bus reset.
5606  */
pci_bus_error_reset(struct pci_dev * bridge)5607 int pci_bus_error_reset(struct pci_dev *bridge)
5608 {
5609 	struct pci_bus *bus = bridge->subordinate;
5610 	struct pci_slot *slot;
5611 
5612 	if (!bus)
5613 		return -ENOTTY;
5614 
5615 	mutex_lock(&pci_slot_mutex);
5616 	if (list_empty(&bus->slots))
5617 		goto bus_reset;
5618 
5619 	list_for_each_entry(slot, &bus->slots, list)
5620 		if (pci_probe_reset_slot(slot))
5621 			goto bus_reset;
5622 
5623 	list_for_each_entry(slot, &bus->slots, list)
5624 		if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5625 			goto bus_reset;
5626 
5627 	mutex_unlock(&pci_slot_mutex);
5628 	return 0;
5629 bus_reset:
5630 	mutex_unlock(&pci_slot_mutex);
5631 	return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5632 }
5633 
5634 /**
5635  * pci_probe_reset_bus - probe whether a PCI bus can be reset
5636  * @bus: PCI bus to probe
5637  *
5638  * Return 0 if bus can be reset, negative if a bus reset is not supported.
5639  */
pci_probe_reset_bus(struct pci_bus * bus)5640 int pci_probe_reset_bus(struct pci_bus *bus)
5641 {
5642 	return pci_bus_reset(bus, PCI_RESET_PROBE);
5643 }
5644 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5645 
5646 /**
5647  * __pci_reset_bus - Try to reset a PCI bus
5648  * @bus: top level PCI bus to reset
5649  *
5650  * Same as above except return -EAGAIN if the bus cannot be locked
5651  */
__pci_reset_bus(struct pci_bus * bus)5652 int __pci_reset_bus(struct pci_bus *bus)
5653 {
5654 	int rc;
5655 
5656 	rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5657 	if (rc)
5658 		return rc;
5659 
5660 	if (pci_bus_trylock(bus)) {
5661 		pci_bus_save_and_disable_locked(bus);
5662 		might_sleep();
5663 		rc = pci_bridge_secondary_bus_reset(bus->self);
5664 		pci_bus_restore_locked(bus);
5665 		pci_bus_unlock(bus);
5666 	} else
5667 		rc = -EAGAIN;
5668 
5669 	return rc;
5670 }
5671 
5672 /**
5673  * pci_reset_bus - Try to reset a PCI bus
5674  * @pdev: top level PCI device to reset via slot/bus
5675  *
5676  * Same as above except return -EAGAIN if the bus cannot be locked
5677  */
pci_reset_bus(struct pci_dev * pdev)5678 int pci_reset_bus(struct pci_dev *pdev)
5679 {
5680 	return (!pci_probe_reset_slot(pdev->slot)) ?
5681 	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5682 }
5683 EXPORT_SYMBOL_GPL(pci_reset_bus);
5684 
5685 /**
5686  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5687  * @dev: PCI device to query
5688  *
5689  * Returns mmrbc: maximum designed memory read count in bytes or
5690  * appropriate error value.
5691  */
pcix_get_max_mmrbc(struct pci_dev * dev)5692 int pcix_get_max_mmrbc(struct pci_dev *dev)
5693 {
5694 	int cap;
5695 	u32 stat;
5696 
5697 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5698 	if (!cap)
5699 		return -EINVAL;
5700 
5701 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5702 		return -EINVAL;
5703 
5704 	return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat);
5705 }
5706 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5707 
5708 /**
5709  * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5710  * @dev: PCI device to query
5711  *
5712  * Returns mmrbc: maximum memory read count in bytes or appropriate error
5713  * value.
5714  */
pcix_get_mmrbc(struct pci_dev * dev)5715 int pcix_get_mmrbc(struct pci_dev *dev)
5716 {
5717 	int cap;
5718 	u16 cmd;
5719 
5720 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5721 	if (!cap)
5722 		return -EINVAL;
5723 
5724 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5725 		return -EINVAL;
5726 
5727 	return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5728 }
5729 EXPORT_SYMBOL(pcix_get_mmrbc);
5730 
5731 /**
5732  * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5733  * @dev: PCI device to query
5734  * @mmrbc: maximum memory read count in bytes
5735  *    valid values are 512, 1024, 2048, 4096
5736  *
5737  * If possible sets maximum memory read byte count, some bridges have errata
5738  * that prevent this.
5739  */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)5740 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5741 {
5742 	int cap;
5743 	u32 stat, v, o;
5744 	u16 cmd;
5745 
5746 	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5747 		return -EINVAL;
5748 
5749 	v = ffs(mmrbc) - 10;
5750 
5751 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5752 	if (!cap)
5753 		return -EINVAL;
5754 
5755 	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5756 		return -EINVAL;
5757 
5758 	if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat))
5759 		return -E2BIG;
5760 
5761 	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5762 		return -EINVAL;
5763 
5764 	o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5765 	if (o != v) {
5766 		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5767 			return -EIO;
5768 
5769 		cmd &= ~PCI_X_CMD_MAX_READ;
5770 		cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v);
5771 		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5772 			return -EIO;
5773 	}
5774 	return 0;
5775 }
5776 EXPORT_SYMBOL(pcix_set_mmrbc);
5777 
5778 /**
5779  * pcie_get_readrq - get PCI Express read request size
5780  * @dev: PCI device to query
5781  *
5782  * Returns maximum memory read request in bytes or appropriate error value.
5783  */
pcie_get_readrq(struct pci_dev * dev)5784 int pcie_get_readrq(struct pci_dev *dev)
5785 {
5786 	u16 ctl;
5787 
5788 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5789 
5790 	return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl);
5791 }
5792 EXPORT_SYMBOL(pcie_get_readrq);
5793 
5794 /**
5795  * pcie_set_readrq - set PCI Express maximum memory read request
5796  * @dev: PCI device to query
5797  * @rq: maximum memory read count in bytes
5798  *    valid values are 128, 256, 512, 1024, 2048, 4096
5799  *
5800  * If possible sets maximum memory read request in bytes
5801  */
pcie_set_readrq(struct pci_dev * dev,int rq)5802 int pcie_set_readrq(struct pci_dev *dev, int rq)
5803 {
5804 	u16 v;
5805 	int ret;
5806 	unsigned int firstbit;
5807 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
5808 
5809 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5810 		return -EINVAL;
5811 
5812 	/*
5813 	 * If using the "performance" PCIe config, we clamp the read rq
5814 	 * size to the max packet size to keep the host bridge from
5815 	 * generating requests larger than we can cope with.
5816 	 */
5817 	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5818 		int mps = pcie_get_mps(dev);
5819 
5820 		if (mps < rq)
5821 			rq = mps;
5822 	}
5823 
5824 	firstbit = ffs(rq);
5825 	if (firstbit < 8)
5826 		return -EINVAL;
5827 	v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, firstbit - 8);
5828 
5829 	if (bridge->no_inc_mrrs) {
5830 		int max_mrrs = pcie_get_readrq(dev);
5831 
5832 		if (rq > max_mrrs) {
5833 			pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
5834 			return -EINVAL;
5835 		}
5836 	}
5837 
5838 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5839 						  PCI_EXP_DEVCTL_READRQ, v);
5840 
5841 	return pcibios_err_to_errno(ret);
5842 }
5843 EXPORT_SYMBOL(pcie_set_readrq);
5844 
5845 /**
5846  * pcie_get_mps - get PCI Express maximum payload size
5847  * @dev: PCI device to query
5848  *
5849  * Returns maximum payload size in bytes
5850  */
pcie_get_mps(struct pci_dev * dev)5851 int pcie_get_mps(struct pci_dev *dev)
5852 {
5853 	u16 ctl;
5854 
5855 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5856 
5857 	return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl);
5858 }
5859 EXPORT_SYMBOL(pcie_get_mps);
5860 
5861 /**
5862  * pcie_set_mps - set PCI Express maximum payload size
5863  * @dev: PCI device to query
5864  * @mps: maximum payload size in bytes
5865  *    valid values are 128, 256, 512, 1024, 2048, 4096
5866  *
5867  * If possible sets maximum payload size
5868  */
pcie_set_mps(struct pci_dev * dev,int mps)5869 int pcie_set_mps(struct pci_dev *dev, int mps)
5870 {
5871 	u16 v;
5872 	int ret;
5873 
5874 	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5875 		return -EINVAL;
5876 
5877 	v = ffs(mps) - 8;
5878 	if (v > dev->pcie_mpss)
5879 		return -EINVAL;
5880 	v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v);
5881 
5882 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5883 						  PCI_EXP_DEVCTL_PAYLOAD, v);
5884 
5885 	return pcibios_err_to_errno(ret);
5886 }
5887 EXPORT_SYMBOL(pcie_set_mps);
5888 
to_pcie_link_speed(u16 lnksta)5889 static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
5890 {
5891 	return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
5892 }
5893 
pcie_link_speed_mbps(struct pci_dev * pdev)5894 int pcie_link_speed_mbps(struct pci_dev *pdev)
5895 {
5896 	u16 lnksta;
5897 	int err;
5898 
5899 	err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
5900 	if (err)
5901 		return err;
5902 
5903 	return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta));
5904 }
5905 EXPORT_SYMBOL(pcie_link_speed_mbps);
5906 
5907 /**
5908  * pcie_bandwidth_available - determine minimum link settings of a PCIe
5909  *			      device and its bandwidth limitation
5910  * @dev: PCI device to query
5911  * @limiting_dev: storage for device causing the bandwidth limitation
5912  * @speed: storage for speed of limiting device
5913  * @width: storage for width of limiting device
5914  *
5915  * Walk up the PCI device chain and find the point where the minimum
5916  * bandwidth is available.  Return the bandwidth available there and (if
5917  * limiting_dev, speed, and width pointers are supplied) information about
5918  * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
5919  * raw bandwidth.
5920  */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)5921 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5922 			     enum pci_bus_speed *speed,
5923 			     enum pcie_link_width *width)
5924 {
5925 	u16 lnksta;
5926 	enum pci_bus_speed next_speed;
5927 	enum pcie_link_width next_width;
5928 	u32 bw, next_bw;
5929 
5930 	if (speed)
5931 		*speed = PCI_SPEED_UNKNOWN;
5932 	if (width)
5933 		*width = PCIE_LNK_WIDTH_UNKNOWN;
5934 
5935 	bw = 0;
5936 
5937 	while (dev) {
5938 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5939 
5940 		next_speed = to_pcie_link_speed(lnksta);
5941 		next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
5942 
5943 		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5944 
5945 		/* Check if current device limits the total bandwidth */
5946 		if (!bw || next_bw <= bw) {
5947 			bw = next_bw;
5948 
5949 			if (limiting_dev)
5950 				*limiting_dev = dev;
5951 			if (speed)
5952 				*speed = next_speed;
5953 			if (width)
5954 				*width = next_width;
5955 		}
5956 
5957 		dev = pci_upstream_bridge(dev);
5958 	}
5959 
5960 	return bw;
5961 }
5962 EXPORT_SYMBOL(pcie_bandwidth_available);
5963 
5964 /**
5965  * pcie_get_supported_speeds - query Supported Link Speed Vector
5966  * @dev: PCI device to query
5967  *
5968  * Query @dev supported link speeds.
5969  *
5970  * Implementation Note in PCIe r6.0 sec 7.5.3.18 recommends determining
5971  * supported link speeds using the Supported Link Speeds Vector in the Link
5972  * Capabilities 2 Register (when available).
5973  *
5974  * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.
5975  *
5976  * Without Link Capabilities 2, i.e., prior to PCIe r3.0, Supported Link
5977  * Speeds field in Link Capabilities is used and only 2.5 GT/s and 5.0 GT/s
5978  * speeds were defined.
5979  *
5980  * For @dev without Supported Link Speed Vector, the field is synthesized
5981  * from the Max Link Speed field in the Link Capabilities Register.
5982  *
5983  * Return: Supported Link Speeds Vector (+ reserved 0 at LSB).
5984  */
pcie_get_supported_speeds(struct pci_dev * dev)5985 u8 pcie_get_supported_speeds(struct pci_dev *dev)
5986 {
5987 	u32 lnkcap2, lnkcap;
5988 	u8 speeds;
5989 
5990 	/*
5991 	 * Speeds retain the reserved 0 at LSB before PCIe Supported Link
5992 	 * Speeds Vector to allow using SLS Vector bit defines directly.
5993 	 */
5994 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5995 	speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS;
5996 
5997 	/* Ignore speeds higher than Max Link Speed */
5998 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5999 	speeds &= GENMASK(lnkcap & PCI_EXP_LNKCAP_SLS, 0);
6000 
6001 	/* PCIe r3.0-compliant */
6002 	if (speeds)
6003 		return speeds;
6004 
6005 	/* Synthesize from the Max Link Speed field */
6006 	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6007 		speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB;
6008 	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6009 		speeds = PCI_EXP_LNKCAP2_SLS_2_5GB;
6010 
6011 	return speeds;
6012 }
6013 
6014 /**
6015  * pcie_get_speed_cap - query for the PCI device's link speed capability
6016  * @dev: PCI device to query
6017  *
6018  * Query the PCI device speed capability.
6019  *
6020  * Return: the maximum link speed supported by the device.
6021  */
pcie_get_speed_cap(struct pci_dev * dev)6022 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6023 {
6024 	return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds);
6025 }
6026 EXPORT_SYMBOL(pcie_get_speed_cap);
6027 
6028 /**
6029  * pcie_get_width_cap - query for the PCI device's link width capability
6030  * @dev: PCI device to query
6031  *
6032  * Query the PCI device width capability.  Return the maximum link width
6033  * supported by the device.
6034  */
pcie_get_width_cap(struct pci_dev * dev)6035 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6036 {
6037 	u32 lnkcap;
6038 
6039 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6040 	if (lnkcap)
6041 		return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
6042 
6043 	return PCIE_LNK_WIDTH_UNKNOWN;
6044 }
6045 EXPORT_SYMBOL(pcie_get_width_cap);
6046 
6047 /**
6048  * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6049  * @dev: PCI device
6050  * @speed: storage for link speed
6051  * @width: storage for link width
6052  *
6053  * Calculate a PCI device's link bandwidth by querying for its link speed
6054  * and width, multiplying them, and applying encoding overhead.  The result
6055  * is in Mb/s, i.e., megabits/second of raw bandwidth.
6056  */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6057 static u32 pcie_bandwidth_capable(struct pci_dev *dev,
6058 				  enum pci_bus_speed *speed,
6059 				  enum pcie_link_width *width)
6060 {
6061 	*speed = pcie_get_speed_cap(dev);
6062 	*width = pcie_get_width_cap(dev);
6063 
6064 	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6065 		return 0;
6066 
6067 	return *width * PCIE_SPEED2MBS_ENC(*speed);
6068 }
6069 
6070 /**
6071  * __pcie_print_link_status - Report the PCI device's link speed and width
6072  * @dev: PCI device to query
6073  * @verbose: Print info even when enough bandwidth is available
6074  *
6075  * If the available bandwidth at the device is less than the device is
6076  * capable of, report the device's maximum possible bandwidth and the
6077  * upstream link that limits its performance.  If @verbose, always print
6078  * the available bandwidth, even if the device isn't constrained.
6079  */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)6080 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6081 {
6082 	enum pcie_link_width width, width_cap;
6083 	enum pci_bus_speed speed, speed_cap;
6084 	struct pci_dev *limiting_dev = NULL;
6085 	u32 bw_avail, bw_cap;
6086 	char *flit_mode = "";
6087 
6088 	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6089 	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6090 
6091 	if (dev->bus && dev->bus->flit_mode)
6092 		flit_mode = ", in Flit mode";
6093 
6094 	if (bw_avail >= bw_cap && verbose)
6095 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)%s\n",
6096 			 bw_cap / 1000, bw_cap % 1000,
6097 			 pci_speed_string(speed_cap), width_cap, flit_mode);
6098 	else if (bw_avail < bw_cap)
6099 		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)%s\n",
6100 			 bw_avail / 1000, bw_avail % 1000,
6101 			 pci_speed_string(speed), width,
6102 			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6103 			 bw_cap / 1000, bw_cap % 1000,
6104 			 pci_speed_string(speed_cap), width_cap, flit_mode);
6105 }
6106 
6107 /**
6108  * pcie_print_link_status - Report the PCI device's link speed and width
6109  * @dev: PCI device to query
6110  *
6111  * Report the available bandwidth at the device.
6112  */
pcie_print_link_status(struct pci_dev * dev)6113 void pcie_print_link_status(struct pci_dev *dev)
6114 {
6115 	__pcie_print_link_status(dev, true);
6116 }
6117 EXPORT_SYMBOL(pcie_print_link_status);
6118 
6119 /**
6120  * pci_select_bars - Make BAR mask from the type of resource
6121  * @dev: the PCI device for which BAR mask is made
6122  * @flags: resource type mask to be selected
6123  *
6124  * This helper routine makes bar mask from the type of resource.
6125  */
pci_select_bars(struct pci_dev * dev,unsigned long flags)6126 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6127 {
6128 	int i, bars = 0;
6129 	for (i = 0; i < PCI_NUM_RESOURCES; i++)
6130 		if (pci_resource_flags(dev, i) & flags)
6131 			bars |= (1 << i);
6132 	return bars;
6133 }
6134 EXPORT_SYMBOL(pci_select_bars);
6135 
6136 /* Some architectures require additional programming to enable VGA */
6137 static arch_set_vga_state_t arch_set_vga_state;
6138 
pci_register_set_vga_state(arch_set_vga_state_t func)6139 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6140 {
6141 	arch_set_vga_state = func;	/* NULL disables */
6142 }
6143 
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6144 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6145 				  unsigned int command_bits, u32 flags)
6146 {
6147 	if (arch_set_vga_state)
6148 		return arch_set_vga_state(dev, decode, command_bits,
6149 						flags);
6150 	return 0;
6151 }
6152 
6153 /**
6154  * pci_set_vga_state - set VGA decode state on device and parents if requested
6155  * @dev: the PCI device
6156  * @decode: true = enable decoding, false = disable decoding
6157  * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6158  * @flags: traverse ancestors and change bridges
6159  * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6160  */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6161 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6162 		      unsigned int command_bits, u32 flags)
6163 {
6164 	struct pci_bus *bus;
6165 	struct pci_dev *bridge;
6166 	u16 cmd;
6167 	int rc;
6168 
6169 	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6170 
6171 	/* ARCH specific VGA enables */
6172 	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6173 	if (rc)
6174 		return rc;
6175 
6176 	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6177 		pci_read_config_word(dev, PCI_COMMAND, &cmd);
6178 		if (decode)
6179 			cmd |= command_bits;
6180 		else
6181 			cmd &= ~command_bits;
6182 		pci_write_config_word(dev, PCI_COMMAND, cmd);
6183 	}
6184 
6185 	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6186 		return 0;
6187 
6188 	bus = dev->bus;
6189 	while (bus) {
6190 		bridge = bus->self;
6191 		if (bridge) {
6192 			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6193 					     &cmd);
6194 			if (decode)
6195 				cmd |= PCI_BRIDGE_CTL_VGA;
6196 			else
6197 				cmd &= ~PCI_BRIDGE_CTL_VGA;
6198 			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6199 					      cmd);
6200 		}
6201 		bus = bus->parent;
6202 	}
6203 	return 0;
6204 }
6205 
6206 #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)6207 bool pci_pr3_present(struct pci_dev *pdev)
6208 {
6209 	struct acpi_device *adev;
6210 
6211 	if (acpi_disabled)
6212 		return false;
6213 
6214 	adev = ACPI_COMPANION(&pdev->dev);
6215 	if (!adev)
6216 		return false;
6217 
6218 	return adev->power.flags.power_resources &&
6219 		acpi_has_method(adev->handle, "_PR3");
6220 }
6221 EXPORT_SYMBOL_GPL(pci_pr3_present);
6222 #endif
6223 
6224 /**
6225  * pci_add_dma_alias - Add a DMA devfn alias for a device
6226  * @dev: the PCI device for which alias is added
6227  * @devfn_from: alias slot and function
6228  * @nr_devfns: number of subsequent devfns to alias
6229  *
6230  * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6231  * which is used to program permissible bus-devfn source addresses for DMA
6232  * requests in an IOMMU.  These aliases factor into IOMMU group creation
6233  * and are useful for devices generating DMA requests beyond or different
6234  * from their logical bus-devfn.  Examples include device quirks where the
6235  * device simply uses the wrong devfn, as well as non-transparent bridges
6236  * where the alias may be a proxy for devices in another domain.
6237  *
6238  * IOMMU group creation is performed during device discovery or addition,
6239  * prior to any potential DMA mapping and therefore prior to driver probing
6240  * (especially for userspace assigned devices where IOMMU group definition
6241  * cannot be left as a userspace activity).  DMA aliases should therefore
6242  * be configured via quirks, such as the PCI fixup header quirk.
6243  */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn_from,unsigned int nr_devfns)6244 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6245 		       unsigned int nr_devfns)
6246 {
6247 	int devfn_to;
6248 
6249 	nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6250 	devfn_to = devfn_from + nr_devfns - 1;
6251 
6252 	if (!dev->dma_alias_mask)
6253 		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6254 	if (!dev->dma_alias_mask) {
6255 		pci_warn(dev, "Unable to allocate DMA alias mask\n");
6256 		return;
6257 	}
6258 
6259 	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6260 
6261 	if (nr_devfns == 1)
6262 		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6263 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6264 	else if (nr_devfns > 1)
6265 		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6266 				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6267 				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6268 }
6269 
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)6270 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6271 {
6272 	return (dev1->dma_alias_mask &&
6273 		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6274 	       (dev2->dma_alias_mask &&
6275 		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6276 	       pci_real_dma_dev(dev1) == dev2 ||
6277 	       pci_real_dma_dev(dev2) == dev1;
6278 }
6279 
pci_device_is_present(struct pci_dev * pdev)6280 bool pci_device_is_present(struct pci_dev *pdev)
6281 {
6282 	u32 v;
6283 
6284 	/* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6285 	pdev = pci_physfn(pdev);
6286 	if (pci_dev_is_disconnected(pdev))
6287 		return false;
6288 	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6289 }
6290 EXPORT_SYMBOL_GPL(pci_device_is_present);
6291 
pci_ignore_hotplug(struct pci_dev * dev)6292 void pci_ignore_hotplug(struct pci_dev *dev)
6293 {
6294 	struct pci_dev *bridge = dev->bus->self;
6295 
6296 	dev->ignore_hotplug = 1;
6297 	/* Propagate the "ignore hotplug" setting to the parent bridge. */
6298 	if (bridge)
6299 		bridge->ignore_hotplug = 1;
6300 }
6301 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6302 
6303 /**
6304  * pci_real_dma_dev - Get PCI DMA device for PCI device
6305  * @dev: the PCI device that may have a PCI DMA alias
6306  *
6307  * Permits the platform to provide architecture-specific functionality to
6308  * devices needing to alias DMA to another PCI device on another PCI bus. If
6309  * the PCI device is on the same bus, it is recommended to use
6310  * pci_add_dma_alias(). This is the default implementation. Architecture
6311  * implementations can override this.
6312  */
pci_real_dma_dev(struct pci_dev * dev)6313 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6314 {
6315 	return dev;
6316 }
6317 
pcibios_default_alignment(void)6318 resource_size_t __weak pcibios_default_alignment(void)
6319 {
6320 	return 0;
6321 }
6322 
6323 /*
6324  * Arches that don't want to expose struct resource to userland as-is in
6325  * sysfs and /proc can implement their own pci_resource_to_user().
6326  */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)6327 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6328 				 const struct resource *rsrc,
6329 				 resource_size_t *start, resource_size_t *end)
6330 {
6331 	*start = rsrc->start;
6332 	*end = rsrc->end;
6333 }
6334 
6335 static char *resource_alignment_param;
6336 static DEFINE_SPINLOCK(resource_alignment_lock);
6337 
6338 /**
6339  * pci_specified_resource_alignment - get resource alignment specified by user.
6340  * @dev: the PCI device to get
6341  * @resize: whether or not to change resources' size when reassigning alignment
6342  *
6343  * RETURNS: Resource alignment if it is specified.
6344  *          Zero if it is not specified.
6345  */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)6346 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6347 							bool *resize)
6348 {
6349 	int align_order, count;
6350 	resource_size_t align = pcibios_default_alignment();
6351 	const char *p;
6352 	int ret;
6353 
6354 	spin_lock(&resource_alignment_lock);
6355 	p = resource_alignment_param;
6356 	if (!p || !*p)
6357 		goto out;
6358 	if (pci_has_flag(PCI_PROBE_ONLY)) {
6359 		align = 0;
6360 		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6361 		goto out;
6362 	}
6363 
6364 	while (*p) {
6365 		count = 0;
6366 		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6367 		    p[count] == '@') {
6368 			p += count + 1;
6369 			if (align_order > 63) {
6370 				pr_err("PCI: Invalid requested alignment (order %d)\n",
6371 				       align_order);
6372 				align_order = PAGE_SHIFT;
6373 			}
6374 		} else {
6375 			align_order = PAGE_SHIFT;
6376 		}
6377 
6378 		ret = pci_dev_str_match(dev, p, &p);
6379 		if (ret == 1) {
6380 			*resize = true;
6381 			align = 1ULL << align_order;
6382 			break;
6383 		} else if (ret < 0) {
6384 			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6385 			       p);
6386 			break;
6387 		}
6388 
6389 		if (*p != ';' && *p != ',') {
6390 			/* End of param or invalid format */
6391 			break;
6392 		}
6393 		p++;
6394 	}
6395 out:
6396 	spin_unlock(&resource_alignment_lock);
6397 	return align;
6398 }
6399 
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6400 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6401 					   resource_size_t align, bool resize)
6402 {
6403 	struct resource *r = &dev->resource[bar];
6404 	const char *r_name = pci_resource_name(dev, bar);
6405 	resource_size_t size;
6406 
6407 	if (!(r->flags & IORESOURCE_MEM))
6408 		return;
6409 
6410 	if (r->flags & IORESOURCE_PCI_FIXED) {
6411 		pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
6412 			 r_name, r, (unsigned long long)align);
6413 		return;
6414 	}
6415 
6416 	size = resource_size(r);
6417 	if (size >= align)
6418 		return;
6419 
6420 	/*
6421 	 * Increase the alignment of the resource.  There are two ways we
6422 	 * can do this:
6423 	 *
6424 	 * 1) Increase the size of the resource.  BARs are aligned on their
6425 	 *    size, so when we reallocate space for this resource, we'll
6426 	 *    allocate it with the larger alignment.  This also prevents
6427 	 *    assignment of any other BARs inside the alignment region, so
6428 	 *    if we're requesting page alignment, this means no other BARs
6429 	 *    will share the page.
6430 	 *
6431 	 *    The disadvantage is that this makes the resource larger than
6432 	 *    the hardware BAR, which may break drivers that compute things
6433 	 *    based on the resource size, e.g., to find registers at a
6434 	 *    fixed offset before the end of the BAR.
6435 	 *
6436 	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6437 	 *    set r->start to the desired alignment.  By itself this
6438 	 *    doesn't prevent other BARs being put inside the alignment
6439 	 *    region, but if we realign *every* resource of every device in
6440 	 *    the system, none of them will share an alignment region.
6441 	 *
6442 	 * When the user has requested alignment for only some devices via
6443 	 * the "pci=resource_alignment" argument, "resize" is true and we
6444 	 * use the first method.  Otherwise we assume we're aligning all
6445 	 * devices and we use the second.
6446 	 */
6447 
6448 	pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
6449 		 r_name, r, (unsigned long long)align);
6450 
6451 	if (resize) {
6452 		r->start = 0;
6453 		r->end = align - 1;
6454 	} else {
6455 		r->flags &= ~IORESOURCE_SIZEALIGN;
6456 		r->flags |= IORESOURCE_STARTALIGN;
6457 		resource_set_range(r, align, size);
6458 	}
6459 	r->flags |= IORESOURCE_UNSET;
6460 }
6461 
6462 /*
6463  * This function disables memory decoding and releases memory resources
6464  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6465  * It also rounds up size to specified alignment.
6466  * Later on, the kernel will assign page-aligned memory resource back
6467  * to the device.
6468  */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6469 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6470 {
6471 	int i;
6472 	struct resource *r;
6473 	resource_size_t align;
6474 	u16 command;
6475 	bool resize = false;
6476 
6477 	/*
6478 	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6479 	 * 3.4.1.11.  Their resources are allocated from the space
6480 	 * described by the VF BARx register in the PF's SR-IOV capability.
6481 	 * We can't influence their alignment here.
6482 	 */
6483 	if (dev->is_virtfn)
6484 		return;
6485 
6486 	/* check if specified PCI is target device to reassign */
6487 	align = pci_specified_resource_alignment(dev, &resize);
6488 	if (!align)
6489 		return;
6490 
6491 	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6492 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6493 		pci_warn(dev, "Can't reassign resources to host bridge\n");
6494 		return;
6495 	}
6496 
6497 	pci_read_config_word(dev, PCI_COMMAND, &command);
6498 	command &= ~PCI_COMMAND_MEMORY;
6499 	pci_write_config_word(dev, PCI_COMMAND, command);
6500 
6501 	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6502 		pci_request_resource_alignment(dev, i, align, resize);
6503 
6504 	/*
6505 	 * Need to disable bridge's resource window,
6506 	 * to enable the kernel to reassign new resource
6507 	 * window later on.
6508 	 */
6509 	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6510 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6511 			r = &dev->resource[i];
6512 			if (!(r->flags & IORESOURCE_MEM))
6513 				continue;
6514 			r->flags |= IORESOURCE_UNSET;
6515 			r->end = resource_size(r) - 1;
6516 			r->start = 0;
6517 		}
6518 		pci_disable_bridge_window(dev);
6519 	}
6520 }
6521 
resource_alignment_show(const struct bus_type * bus,char * buf)6522 static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf)
6523 {
6524 	size_t count = 0;
6525 
6526 	spin_lock(&resource_alignment_lock);
6527 	if (resource_alignment_param)
6528 		count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6529 	spin_unlock(&resource_alignment_lock);
6530 
6531 	return count;
6532 }
6533 
resource_alignment_store(const struct bus_type * bus,const char * buf,size_t count)6534 static ssize_t resource_alignment_store(const struct bus_type *bus,
6535 					const char *buf, size_t count)
6536 {
6537 	char *param, *old, *end;
6538 
6539 	if (count >= (PAGE_SIZE - 1))
6540 		return -EINVAL;
6541 
6542 	param = kstrndup(buf, count, GFP_KERNEL);
6543 	if (!param)
6544 		return -ENOMEM;
6545 
6546 	end = strchr(param, '\n');
6547 	if (end)
6548 		*end = '\0';
6549 
6550 	spin_lock(&resource_alignment_lock);
6551 	old = resource_alignment_param;
6552 	if (strlen(param)) {
6553 		resource_alignment_param = param;
6554 	} else {
6555 		kfree(param);
6556 		resource_alignment_param = NULL;
6557 	}
6558 	spin_unlock(&resource_alignment_lock);
6559 
6560 	kfree(old);
6561 
6562 	return count;
6563 }
6564 
6565 static BUS_ATTR_RW(resource_alignment);
6566 
pci_resource_alignment_sysfs_init(void)6567 static int __init pci_resource_alignment_sysfs_init(void)
6568 {
6569 	return bus_create_file(&pci_bus_type,
6570 					&bus_attr_resource_alignment);
6571 }
6572 late_initcall(pci_resource_alignment_sysfs_init);
6573 
pci_no_domains(void)6574 static void pci_no_domains(void)
6575 {
6576 #ifdef CONFIG_PCI_DOMAINS
6577 	pci_domains_supported = 0;
6578 #endif
6579 }
6580 
6581 #ifdef CONFIG_PCI_DOMAINS
6582 static DEFINE_IDA(pci_domain_nr_dynamic_ida);
6583 
6584 /**
6585  * pci_bus_find_emul_domain_nr() - allocate a PCI domain number per constraints
6586  * @hint: desired domain, 0 if any ID in the range of @min to @max is acceptable
6587  * @min: minimum allowable domain
6588  * @max: maximum allowable domain, no IDs higher than INT_MAX will be returned
6589  */
pci_bus_find_emul_domain_nr(u32 hint,u32 min,u32 max)6590 int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max)
6591 {
6592 	return ida_alloc_range(&pci_domain_nr_dynamic_ida, max(hint, min), max,
6593 			       GFP_KERNEL);
6594 }
6595 EXPORT_SYMBOL_GPL(pci_bus_find_emul_domain_nr);
6596 
pci_bus_release_emul_domain_nr(int domain_nr)6597 void pci_bus_release_emul_domain_nr(int domain_nr)
6598 {
6599 	ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
6600 }
6601 EXPORT_SYMBOL_GPL(pci_bus_release_emul_domain_nr);
6602 #endif
6603 
6604 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6605 static DEFINE_IDA(pci_domain_nr_static_ida);
6606 
of_pci_reserve_static_domain_nr(void)6607 static void of_pci_reserve_static_domain_nr(void)
6608 {
6609 	struct device_node *np;
6610 	int domain_nr;
6611 
6612 	for_each_node_by_type(np, "pci") {
6613 		domain_nr = of_get_pci_domain_nr(np);
6614 		if (domain_nr < 0)
6615 			continue;
6616 		/*
6617 		 * Permanently allocate domain_nr in dynamic_ida
6618 		 * to prevent it from dynamic allocation.
6619 		 */
6620 		ida_alloc_range(&pci_domain_nr_dynamic_ida,
6621 				domain_nr, domain_nr, GFP_KERNEL);
6622 	}
6623 }
6624 
of_pci_bus_find_domain_nr(struct device * parent)6625 static int of_pci_bus_find_domain_nr(struct device *parent)
6626 {
6627 	static bool static_domains_reserved = false;
6628 	int domain_nr;
6629 
6630 	/* On the first call scan device tree for static allocations. */
6631 	if (!static_domains_reserved) {
6632 		of_pci_reserve_static_domain_nr();
6633 		static_domains_reserved = true;
6634 	}
6635 
6636 	if (parent) {
6637 		/*
6638 		 * If domain is in DT, allocate it in static IDA.  This
6639 		 * prevents duplicate static allocations in case of errors
6640 		 * in DT.
6641 		 */
6642 		domain_nr = of_get_pci_domain_nr(parent->of_node);
6643 		if (domain_nr >= 0)
6644 			return ida_alloc_range(&pci_domain_nr_static_ida,
6645 					       domain_nr, domain_nr,
6646 					       GFP_KERNEL);
6647 	}
6648 
6649 	/*
6650 	 * If domain was not specified in DT, choose a free ID from dynamic
6651 	 * allocations. All domain numbers from DT are permanently in
6652 	 * dynamic allocations to prevent assigning them to other DT nodes
6653 	 * without static domain.
6654 	 */
6655 	return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
6656 }
6657 
of_pci_bus_release_domain_nr(struct device * parent,int domain_nr)6658 static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6659 {
6660 	if (domain_nr < 0)
6661 		return;
6662 
6663 	/* Release domain from IDA where it was allocated. */
6664 	if (parent && of_get_pci_domain_nr(parent->of_node) == domain_nr)
6665 		ida_free(&pci_domain_nr_static_ida, domain_nr);
6666 	else
6667 		ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
6668 }
6669 
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6670 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6671 {
6672 	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6673 			       acpi_pci_bus_find_domain_nr(bus);
6674 }
6675 
pci_bus_release_domain_nr(struct device * parent,int domain_nr)6676 void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6677 {
6678 	if (!acpi_disabled)
6679 		return;
6680 	of_pci_bus_release_domain_nr(parent, domain_nr);
6681 }
6682 #endif
6683 
6684 /**
6685  * pci_ext_cfg_avail - can we access extended PCI config space?
6686  *
6687  * Returns 1 if we can access PCI extended config space (offsets
6688  * greater than 0xff). This is the default implementation. Architecture
6689  * implementations can override this.
6690  */
pci_ext_cfg_avail(void)6691 int __weak pci_ext_cfg_avail(void)
6692 {
6693 	return 1;
6694 }
6695 
pci_setup(char * str)6696 static int __init pci_setup(char *str)
6697 {
6698 	while (str) {
6699 		char *k = strchr(str, ',');
6700 		if (k)
6701 			*k++ = 0;
6702 		if (*str && (str = pcibios_setup(str)) && *str) {
6703 			if (!pci_setup_cardbus(str)) {
6704 				/* Function handled the parameters */
6705 			} else if (!strcmp(str, "nomsi")) {
6706 				pci_no_msi();
6707 			} else if (!strncmp(str, "noats", 5)) {
6708 				pr_info("PCIe: ATS is disabled\n");
6709 				pcie_ats_disabled = true;
6710 			} else if (!strcmp(str, "noaer")) {
6711 				pci_no_aer();
6712 			} else if (!strcmp(str, "earlydump")) {
6713 				pci_early_dump = true;
6714 			} else if (!strncmp(str, "realloc=", 8)) {
6715 				pci_realloc_get_opt(str + 8);
6716 			} else if (!strncmp(str, "realloc", 7)) {
6717 				pci_realloc_get_opt("on");
6718 			} else if (!strcmp(str, "nodomains")) {
6719 				pci_no_domains();
6720 			} else if (!strncmp(str, "noari", 5)) {
6721 				pcie_ari_disabled = true;
6722 			} else if (!strncmp(str, "notph", 5)) {
6723 				pci_no_tph();
6724 			} else if (!strncmp(str, "resource_alignment=", 19)) {
6725 				resource_alignment_param = str + 19;
6726 			} else if (!strncmp(str, "ecrc=", 5)) {
6727 				pcie_ecrc_get_policy(str + 5);
6728 			} else if (!strncmp(str, "hpiosize=", 9)) {
6729 				pci_hotplug_io_size = memparse(str + 9, &str);
6730 			} else if (!strncmp(str, "hpmmiosize=", 11)) {
6731 				pci_hotplug_mmio_size = memparse(str + 11, &str);
6732 			} else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6733 				pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6734 			} else if (!strncmp(str, "hpmemsize=", 10)) {
6735 				pci_hotplug_mmio_size = memparse(str + 10, &str);
6736 				pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6737 			} else if (!strncmp(str, "hpbussize=", 10)) {
6738 				pci_hotplug_bus_size =
6739 					simple_strtoul(str + 10, &str, 0);
6740 				if (pci_hotplug_bus_size > 0xff)
6741 					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6742 			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6743 				pcie_bus_config = PCIE_BUS_TUNE_OFF;
6744 			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
6745 				pcie_bus_config = PCIE_BUS_SAFE;
6746 			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
6747 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
6748 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6749 				pcie_bus_config = PCIE_BUS_PEER2PEER;
6750 			} else if (!strncmp(str, "pcie_scan_all", 13)) {
6751 				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6752 			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
6753 				disable_acs_redir_param = str + 18;
6754 			} else if (!strncmp(str, "config_acs=", 11)) {
6755 				config_acs_param = str + 11;
6756 			} else {
6757 				pr_err("PCI: Unknown option `%s'\n", str);
6758 			}
6759 		}
6760 		str = k;
6761 	}
6762 	return 0;
6763 }
6764 early_param("pci", pci_setup);
6765 
6766 /*
6767  * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6768  * in pci_setup(), above, to point to data in the __initdata section which
6769  * will be freed after the init sequence is complete. We can't allocate memory
6770  * in pci_setup() because some architectures do not have any memory allocation
6771  * service available during an early_param() call. So we allocate memory and
6772  * copy the variable here before the init section is freed.
6773  *
6774  */
pci_realloc_setup_params(void)6775 static int __init pci_realloc_setup_params(void)
6776 {
6777 	resource_alignment_param = kstrdup(resource_alignment_param,
6778 					   GFP_KERNEL);
6779 	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6780 	config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
6781 
6782 	return 0;
6783 }
6784 pure_initcall(pci_realloc_setup_params);
6785