1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 *
5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6 * David Mosberger-Tang
7 *
8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9 */
10
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/pci_hotplug.h>
30 #include <linux/vmalloc.h>
31 #include <asm/dma.h>
32 #include <linux/aer.h>
33 #include <linux/bitfield.h>
34 #include "pci.h"
35
36 DEFINE_MUTEX(pci_slot_mutex);
37
38 const char *pci_power_names[] = {
39 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
40 };
41 EXPORT_SYMBOL_GPL(pci_power_names);
42
43 #ifdef CONFIG_X86_32
44 int isa_dma_bridge_buggy;
45 EXPORT_SYMBOL(isa_dma_bridge_buggy);
46 #endif
47
48 int pci_pci_problems;
49 EXPORT_SYMBOL(pci_pci_problems);
50
51 unsigned int pci_pm_d3hot_delay;
52
53 static void pci_pme_list_scan(struct work_struct *work);
54
55 static LIST_HEAD(pci_pme_list);
56 static DEFINE_MUTEX(pci_pme_list_mutex);
57 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
58
59 struct pci_pme_device {
60 struct list_head list;
61 struct pci_dev *dev;
62 };
63
64 #define PME_TIMEOUT 1000 /* How long between PME checks */
65
66 /*
67 * Following exit from Conventional Reset, devices must be ready within 1 sec
68 * (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional
69 * Reset (PCIe r6.0 sec 5.8).
70 */
71 #define PCI_RESET_WAIT 1000 /* msec */
72
73 /*
74 * Devices may extend the 1 sec period through Request Retry Status
75 * completions (PCIe r6.0 sec 2.3.1). The spec does not provide an upper
76 * limit, but 60 sec ought to be enough for any device to become
77 * responsive.
78 */
79 #define PCIE_RESET_READY_POLL_MS 60000 /* msec */
80
pci_dev_d3_sleep(struct pci_dev * dev)81 static void pci_dev_d3_sleep(struct pci_dev *dev)
82 {
83 unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
84 unsigned int upper;
85
86 if (delay_ms) {
87 /* Use a 20% upper bound, 1ms minimum */
88 upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
89 usleep_range(delay_ms * USEC_PER_MSEC,
90 (delay_ms + upper) * USEC_PER_MSEC);
91 }
92 }
93
pci_reset_supported(struct pci_dev * dev)94 bool pci_reset_supported(struct pci_dev *dev)
95 {
96 return dev->reset_methods[0] != 0;
97 }
98
99 #ifdef CONFIG_PCI_DOMAINS
100 int pci_domains_supported = 1;
101 #endif
102
103 #define DEFAULT_CARDBUS_IO_SIZE (256)
104 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
105 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
106 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
107 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
108
109 #define DEFAULT_HOTPLUG_IO_SIZE (256)
110 #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
111 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
112 /* hpiosize=nn can override this */
113 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
114 /*
115 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
116 * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
117 * pci=hpmemsize=nnM overrides both
118 */
119 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
120 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
121
122 #define DEFAULT_HOTPLUG_BUS_SIZE 1
123 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
124
125
126 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
127 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
128 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
129 #elif defined CONFIG_PCIE_BUS_SAFE
130 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
131 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
132 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
133 #elif defined CONFIG_PCIE_BUS_PEER2PEER
134 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
135 #else
136 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
137 #endif
138
139 /*
140 * The default CLS is used if arch didn't set CLS explicitly and not
141 * all pci devices agree on the same value. Arch can override either
142 * the dfl or actual value as it sees fit. Don't forget this is
143 * measured in 32-bit words, not bytes.
144 */
145 u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2;
146 u8 pci_cache_line_size __ro_after_init ;
147
148 /*
149 * If we set up a device for bus mastering, we need to check the latency
150 * timer as certain BIOSes forget to set it properly.
151 */
152 unsigned int pcibios_max_latency = 255;
153
154 /* If set, the PCIe ARI capability will not be used. */
155 static bool pcie_ari_disabled;
156
157 /* If set, the PCIe ATS capability will not be used. */
158 static bool pcie_ats_disabled;
159
160 /* If set, the PCI config space of each device is printed during boot. */
161 bool pci_early_dump;
162
pci_ats_disabled(void)163 bool pci_ats_disabled(void)
164 {
165 return pcie_ats_disabled;
166 }
167 EXPORT_SYMBOL_GPL(pci_ats_disabled);
168
169 /* Disable bridge_d3 for all PCIe ports */
170 static bool pci_bridge_d3_disable;
171 /* Force bridge_d3 for all PCIe ports */
172 static bool pci_bridge_d3_force;
173
pcie_port_pm_setup(char * str)174 static int __init pcie_port_pm_setup(char *str)
175 {
176 if (!strcmp(str, "off"))
177 pci_bridge_d3_disable = true;
178 else if (!strcmp(str, "force"))
179 pci_bridge_d3_force = true;
180 return 1;
181 }
182 __setup("pcie_port_pm=", pcie_port_pm_setup);
183
184 /**
185 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
186 * @bus: pointer to PCI bus structure to search
187 *
188 * Given a PCI bus, returns the highest PCI bus number present in the set
189 * including the given PCI bus and its list of child PCI buses.
190 */
pci_bus_max_busnr(struct pci_bus * bus)191 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
192 {
193 struct pci_bus *tmp;
194 unsigned char max, n;
195
196 max = bus->busn_res.end;
197 list_for_each_entry(tmp, &bus->children, node) {
198 n = pci_bus_max_busnr(tmp);
199 if (n > max)
200 max = n;
201 }
202 return max;
203 }
204 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
205
206 /**
207 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
208 * @pdev: the PCI device
209 *
210 * Returns error bits set in PCI_STATUS and clears them.
211 */
pci_status_get_and_clear_errors(struct pci_dev * pdev)212 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
213 {
214 u16 status;
215 int ret;
216
217 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
218 if (ret != PCIBIOS_SUCCESSFUL)
219 return -EIO;
220
221 status &= PCI_STATUS_ERROR_BITS;
222 if (status)
223 pci_write_config_word(pdev, PCI_STATUS, status);
224
225 return status;
226 }
227 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
228
229 #ifdef CONFIG_HAS_IOMEM
__pci_ioremap_resource(struct pci_dev * pdev,int bar,bool write_combine)230 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
231 bool write_combine)
232 {
233 struct resource *res = &pdev->resource[bar];
234 resource_size_t start = res->start;
235 resource_size_t size = resource_size(res);
236
237 /*
238 * Make sure the BAR is actually a memory resource, not an IO resource
239 */
240 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
241 pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
242 return NULL;
243 }
244
245 if (write_combine)
246 return ioremap_wc(start, size);
247
248 return ioremap(start, size);
249 }
250
pci_ioremap_bar(struct pci_dev * pdev,int bar)251 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
252 {
253 return __pci_ioremap_resource(pdev, bar, false);
254 }
255 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
256
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)257 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
258 {
259 return __pci_ioremap_resource(pdev, bar, true);
260 }
261 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
262 #endif
263
264 /**
265 * pci_dev_str_match_path - test if a path string matches a device
266 * @dev: the PCI device to test
267 * @path: string to match the device against
268 * @endptr: pointer to the string after the match
269 *
270 * Test if a string (typically from a kernel parameter) formatted as a
271 * path of device/function addresses matches a PCI device. The string must
272 * be of the form:
273 *
274 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
275 *
276 * A path for a device can be obtained using 'lspci -t'. Using a path
277 * is more robust against bus renumbering than using only a single bus,
278 * device and function address.
279 *
280 * Returns 1 if the string matches the device, 0 if it does not and
281 * a negative error code if it fails to parse the string.
282 */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)283 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
284 const char **endptr)
285 {
286 int ret;
287 unsigned int seg, bus, slot, func;
288 char *wpath, *p;
289 char end;
290
291 *endptr = strchrnul(path, ';');
292
293 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
294 if (!wpath)
295 return -ENOMEM;
296
297 while (1) {
298 p = strrchr(wpath, '/');
299 if (!p)
300 break;
301 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
302 if (ret != 2) {
303 ret = -EINVAL;
304 goto free_and_exit;
305 }
306
307 if (dev->devfn != PCI_DEVFN(slot, func)) {
308 ret = 0;
309 goto free_and_exit;
310 }
311
312 /*
313 * Note: we don't need to get a reference to the upstream
314 * bridge because we hold a reference to the top level
315 * device which should hold a reference to the bridge,
316 * and so on.
317 */
318 dev = pci_upstream_bridge(dev);
319 if (!dev) {
320 ret = 0;
321 goto free_and_exit;
322 }
323
324 *p = 0;
325 }
326
327 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
328 &func, &end);
329 if (ret != 4) {
330 seg = 0;
331 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
332 if (ret != 3) {
333 ret = -EINVAL;
334 goto free_and_exit;
335 }
336 }
337
338 ret = (seg == pci_domain_nr(dev->bus) &&
339 bus == dev->bus->number &&
340 dev->devfn == PCI_DEVFN(slot, func));
341
342 free_and_exit:
343 kfree(wpath);
344 return ret;
345 }
346
347 /**
348 * pci_dev_str_match - test if a string matches a device
349 * @dev: the PCI device to test
350 * @p: string to match the device against
351 * @endptr: pointer to the string after the match
352 *
353 * Test if a string (typically from a kernel parameter) matches a specified
354 * PCI device. The string may be of one of the following formats:
355 *
356 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
357 * pci:<vendor>:<device>[:<subvendor>:<subdevice>]
358 *
359 * The first format specifies a PCI bus/device/function address which
360 * may change if new hardware is inserted, if motherboard firmware changes,
361 * or due to changes caused in kernel parameters. If the domain is
362 * left unspecified, it is taken to be 0. In order to be robust against
363 * bus renumbering issues, a path of PCI device/function numbers may be used
364 * to address the specific device. The path for a device can be determined
365 * through the use of 'lspci -t'.
366 *
367 * The second format matches devices using IDs in the configuration
368 * space which may match multiple devices in the system. A value of 0
369 * for any field will match all devices. (Note: this differs from
370 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
371 * legacy reasons and convenience so users don't have to specify
372 * FFFFFFFFs on the command line.)
373 *
374 * Returns 1 if the string matches the device, 0 if it does not and
375 * a negative error code if the string cannot be parsed.
376 */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)377 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
378 const char **endptr)
379 {
380 int ret;
381 int count;
382 unsigned short vendor, device, subsystem_vendor, subsystem_device;
383
384 if (strncmp(p, "pci:", 4) == 0) {
385 /* PCI vendor/device (subvendor/subdevice) IDs are specified */
386 p += 4;
387 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
388 &subsystem_vendor, &subsystem_device, &count);
389 if (ret != 4) {
390 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
391 if (ret != 2)
392 return -EINVAL;
393
394 subsystem_vendor = 0;
395 subsystem_device = 0;
396 }
397
398 p += count;
399
400 if ((!vendor || vendor == dev->vendor) &&
401 (!device || device == dev->device) &&
402 (!subsystem_vendor ||
403 subsystem_vendor == dev->subsystem_vendor) &&
404 (!subsystem_device ||
405 subsystem_device == dev->subsystem_device))
406 goto found;
407 } else {
408 /*
409 * PCI Bus, Device, Function IDs are specified
410 * (optionally, may include a path of devfns following it)
411 */
412 ret = pci_dev_str_match_path(dev, p, &p);
413 if (ret < 0)
414 return ret;
415 else if (ret)
416 goto found;
417 }
418
419 *endptr = p;
420 return 0;
421
422 found:
423 *endptr = p;
424 return 1;
425 }
426
__pci_find_next_cap_ttl(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap,int * ttl)427 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
428 u8 pos, int cap, int *ttl)
429 {
430 u8 id;
431 u16 ent;
432
433 pci_bus_read_config_byte(bus, devfn, pos, &pos);
434
435 while ((*ttl)--) {
436 if (pos < 0x40)
437 break;
438 pos &= ~3;
439 pci_bus_read_config_word(bus, devfn, pos, &ent);
440
441 id = ent & 0xff;
442 if (id == 0xff)
443 break;
444 if (id == cap)
445 return pos;
446 pos = (ent >> 8);
447 }
448 return 0;
449 }
450
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)451 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
452 u8 pos, int cap)
453 {
454 int ttl = PCI_FIND_CAP_TTL;
455
456 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
457 }
458
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)459 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
460 {
461 return __pci_find_next_cap(dev->bus, dev->devfn,
462 pos + PCI_CAP_LIST_NEXT, cap);
463 }
464 EXPORT_SYMBOL_GPL(pci_find_next_capability);
465
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)466 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
467 unsigned int devfn, u8 hdr_type)
468 {
469 u16 status;
470
471 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
472 if (!(status & PCI_STATUS_CAP_LIST))
473 return 0;
474
475 switch (hdr_type) {
476 case PCI_HEADER_TYPE_NORMAL:
477 case PCI_HEADER_TYPE_BRIDGE:
478 return PCI_CAPABILITY_LIST;
479 case PCI_HEADER_TYPE_CARDBUS:
480 return PCI_CB_CAPABILITY_LIST;
481 }
482
483 return 0;
484 }
485
486 /**
487 * pci_find_capability - query for devices' capabilities
488 * @dev: PCI device to query
489 * @cap: capability code
490 *
491 * Tell if a device supports a given PCI capability.
492 * Returns the address of the requested capability structure within the
493 * device's PCI configuration space or 0 in case the device does not
494 * support it. Possible values for @cap include:
495 *
496 * %PCI_CAP_ID_PM Power Management
497 * %PCI_CAP_ID_AGP Accelerated Graphics Port
498 * %PCI_CAP_ID_VPD Vital Product Data
499 * %PCI_CAP_ID_SLOTID Slot Identification
500 * %PCI_CAP_ID_MSI Message Signalled Interrupts
501 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
502 * %PCI_CAP_ID_PCIX PCI-X
503 * %PCI_CAP_ID_EXP PCI Express
504 */
pci_find_capability(struct pci_dev * dev,int cap)505 u8 pci_find_capability(struct pci_dev *dev, int cap)
506 {
507 u8 pos;
508
509 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
510 if (pos)
511 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
512
513 return pos;
514 }
515 EXPORT_SYMBOL(pci_find_capability);
516
517 /**
518 * pci_bus_find_capability - query for devices' capabilities
519 * @bus: the PCI bus to query
520 * @devfn: PCI device to query
521 * @cap: capability code
522 *
523 * Like pci_find_capability() but works for PCI devices that do not have a
524 * pci_dev structure set up yet.
525 *
526 * Returns the address of the requested capability structure within the
527 * device's PCI configuration space or 0 in case the device does not
528 * support it.
529 */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)530 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
531 {
532 u8 hdr_type, pos;
533
534 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
535
536 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK);
537 if (pos)
538 pos = __pci_find_next_cap(bus, devfn, pos, cap);
539
540 return pos;
541 }
542 EXPORT_SYMBOL(pci_bus_find_capability);
543
544 /**
545 * pci_find_next_ext_capability - Find an extended capability
546 * @dev: PCI device to query
547 * @start: address at which to start looking (0 to start at beginning of list)
548 * @cap: capability code
549 *
550 * Returns the address of the next matching extended capability structure
551 * within the device's PCI configuration space or 0 if the device does
552 * not support it. Some capabilities can occur several times, e.g., the
553 * vendor-specific capability, and this provides a way to find them all.
554 */
pci_find_next_ext_capability(struct pci_dev * dev,u16 start,int cap)555 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
556 {
557 u32 header;
558 int ttl;
559 u16 pos = PCI_CFG_SPACE_SIZE;
560
561 /* minimum 8 bytes per capability */
562 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
563
564 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
565 return 0;
566
567 if (start)
568 pos = start;
569
570 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
571 return 0;
572
573 /*
574 * If we have no capabilities, this is indicated by cap ID,
575 * cap version and next pointer all being 0.
576 */
577 if (header == 0)
578 return 0;
579
580 while (ttl-- > 0) {
581 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
582 return pos;
583
584 pos = PCI_EXT_CAP_NEXT(header);
585 if (pos < PCI_CFG_SPACE_SIZE)
586 break;
587
588 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
589 break;
590 }
591
592 return 0;
593 }
594 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
595
596 /**
597 * pci_find_ext_capability - Find an extended capability
598 * @dev: PCI device to query
599 * @cap: capability code
600 *
601 * Returns the address of the requested extended capability structure
602 * within the device's PCI configuration space or 0 if the device does
603 * not support it. Possible values for @cap include:
604 *
605 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
606 * %PCI_EXT_CAP_ID_VC Virtual Channel
607 * %PCI_EXT_CAP_ID_DSN Device Serial Number
608 * %PCI_EXT_CAP_ID_PWR Power Budgeting
609 */
pci_find_ext_capability(struct pci_dev * dev,int cap)610 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
611 {
612 return pci_find_next_ext_capability(dev, 0, cap);
613 }
614 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
615
616 /**
617 * pci_get_dsn - Read and return the 8-byte Device Serial Number
618 * @dev: PCI device to query
619 *
620 * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
621 * Number.
622 *
623 * Returns the DSN, or zero if the capability does not exist.
624 */
pci_get_dsn(struct pci_dev * dev)625 u64 pci_get_dsn(struct pci_dev *dev)
626 {
627 u32 dword;
628 u64 dsn;
629 int pos;
630
631 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
632 if (!pos)
633 return 0;
634
635 /*
636 * The Device Serial Number is two dwords offset 4 bytes from the
637 * capability position. The specification says that the first dword is
638 * the lower half, and the second dword is the upper half.
639 */
640 pos += 4;
641 pci_read_config_dword(dev, pos, &dword);
642 dsn = (u64)dword;
643 pci_read_config_dword(dev, pos + 4, &dword);
644 dsn |= ((u64)dword) << 32;
645
646 return dsn;
647 }
648 EXPORT_SYMBOL_GPL(pci_get_dsn);
649
__pci_find_next_ht_cap(struct pci_dev * dev,u8 pos,int ht_cap)650 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
651 {
652 int rc, ttl = PCI_FIND_CAP_TTL;
653 u8 cap, mask;
654
655 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
656 mask = HT_3BIT_CAP_MASK;
657 else
658 mask = HT_5BIT_CAP_MASK;
659
660 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
661 PCI_CAP_ID_HT, &ttl);
662 while (pos) {
663 rc = pci_read_config_byte(dev, pos + 3, &cap);
664 if (rc != PCIBIOS_SUCCESSFUL)
665 return 0;
666
667 if ((cap & mask) == ht_cap)
668 return pos;
669
670 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
671 pos + PCI_CAP_LIST_NEXT,
672 PCI_CAP_ID_HT, &ttl);
673 }
674
675 return 0;
676 }
677
678 /**
679 * pci_find_next_ht_capability - query a device's HyperTransport capabilities
680 * @dev: PCI device to query
681 * @pos: Position from which to continue searching
682 * @ht_cap: HyperTransport capability code
683 *
684 * To be used in conjunction with pci_find_ht_capability() to search for
685 * all capabilities matching @ht_cap. @pos should always be a value returned
686 * from pci_find_ht_capability().
687 *
688 * NB. To be 100% safe against broken PCI devices, the caller should take
689 * steps to avoid an infinite loop.
690 */
pci_find_next_ht_capability(struct pci_dev * dev,u8 pos,int ht_cap)691 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
692 {
693 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
694 }
695 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
696
697 /**
698 * pci_find_ht_capability - query a device's HyperTransport capabilities
699 * @dev: PCI device to query
700 * @ht_cap: HyperTransport capability code
701 *
702 * Tell if a device supports a given HyperTransport capability.
703 * Returns an address within the device's PCI configuration space
704 * or 0 in case the device does not support the request capability.
705 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
706 * which has a HyperTransport capability matching @ht_cap.
707 */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)708 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
709 {
710 u8 pos;
711
712 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
713 if (pos)
714 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
715
716 return pos;
717 }
718 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
719
720 /**
721 * pci_find_vsec_capability - Find a vendor-specific extended capability
722 * @dev: PCI device to query
723 * @vendor: Vendor ID for which capability is defined
724 * @cap: Vendor-specific capability ID
725 *
726 * If @dev has Vendor ID @vendor, search for a VSEC capability with
727 * VSEC ID @cap. If found, return the capability offset in
728 * config space; otherwise return 0.
729 */
pci_find_vsec_capability(struct pci_dev * dev,u16 vendor,int cap)730 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
731 {
732 u16 vsec = 0;
733 u32 header;
734 int ret;
735
736 if (vendor != dev->vendor)
737 return 0;
738
739 while ((vsec = pci_find_next_ext_capability(dev, vsec,
740 PCI_EXT_CAP_ID_VNDR))) {
741 ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
742 if (ret != PCIBIOS_SUCCESSFUL)
743 continue;
744
745 if (PCI_VNDR_HEADER_ID(header) == cap)
746 return vsec;
747 }
748
749 return 0;
750 }
751 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
752
753 /**
754 * pci_find_dvsec_capability - Find DVSEC for vendor
755 * @dev: PCI device to query
756 * @vendor: Vendor ID to match for the DVSEC
757 * @dvsec: Designated Vendor-specific capability ID
758 *
759 * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
760 * offset in config space; otherwise return 0.
761 */
pci_find_dvsec_capability(struct pci_dev * dev,u16 vendor,u16 dvsec)762 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
763 {
764 int pos;
765
766 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
767 if (!pos)
768 return 0;
769
770 while (pos) {
771 u16 v, id;
772
773 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
774 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
775 if (vendor == v && dvsec == id)
776 return pos;
777
778 pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
779 }
780
781 return 0;
782 }
783 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
784
785 /**
786 * pci_find_parent_resource - return resource region of parent bus of given
787 * region
788 * @dev: PCI device structure contains resources to be searched
789 * @res: child resource record for which parent is sought
790 *
791 * For given resource region of given device, return the resource region of
792 * parent bus the given region is contained in.
793 */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)794 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
795 struct resource *res)
796 {
797 const struct pci_bus *bus = dev->bus;
798 struct resource *r;
799
800 pci_bus_for_each_resource(bus, r) {
801 if (!r)
802 continue;
803 if (resource_contains(r, res)) {
804
805 /*
806 * If the window is prefetchable but the BAR is
807 * not, the allocator made a mistake.
808 */
809 if (r->flags & IORESOURCE_PREFETCH &&
810 !(res->flags & IORESOURCE_PREFETCH))
811 return NULL;
812
813 /*
814 * If we're below a transparent bridge, there may
815 * be both a positively-decoded aperture and a
816 * subtractively-decoded region that contain the BAR.
817 * We want the positively-decoded one, so this depends
818 * on pci_bus_for_each_resource() giving us those
819 * first.
820 */
821 return r;
822 }
823 }
824 return NULL;
825 }
826 EXPORT_SYMBOL(pci_find_parent_resource);
827
828 /**
829 * pci_find_resource - Return matching PCI device resource
830 * @dev: PCI device to query
831 * @res: Resource to look for
832 *
833 * Goes over standard PCI resources (BARs) and checks if the given resource
834 * is partially or fully contained in any of them. In that case the
835 * matching resource is returned, %NULL otherwise.
836 */
pci_find_resource(struct pci_dev * dev,struct resource * res)837 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
838 {
839 int i;
840
841 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
842 struct resource *r = &dev->resource[i];
843
844 if (r->start && resource_contains(r, res))
845 return r;
846 }
847
848 return NULL;
849 }
850 EXPORT_SYMBOL(pci_find_resource);
851
852 /**
853 * pci_resource_name - Return the name of the PCI resource
854 * @dev: PCI device to query
855 * @i: index of the resource
856 *
857 * Return the standard PCI resource (BAR) name according to their index.
858 */
pci_resource_name(struct pci_dev * dev,unsigned int i)859 const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
860 {
861 static const char * const bar_name[] = {
862 "BAR 0",
863 "BAR 1",
864 "BAR 2",
865 "BAR 3",
866 "BAR 4",
867 "BAR 5",
868 "ROM",
869 #ifdef CONFIG_PCI_IOV
870 "VF BAR 0",
871 "VF BAR 1",
872 "VF BAR 2",
873 "VF BAR 3",
874 "VF BAR 4",
875 "VF BAR 5",
876 #endif
877 "bridge window", /* "io" included in %pR */
878 "bridge window", /* "mem" included in %pR */
879 "bridge window", /* "mem pref" included in %pR */
880 };
881 static const char * const cardbus_name[] = {
882 "BAR 1",
883 "unknown",
884 "unknown",
885 "unknown",
886 "unknown",
887 "unknown",
888 #ifdef CONFIG_PCI_IOV
889 "unknown",
890 "unknown",
891 "unknown",
892 "unknown",
893 "unknown",
894 "unknown",
895 #endif
896 "CardBus bridge window 0", /* I/O */
897 "CardBus bridge window 1", /* I/O */
898 "CardBus bridge window 0", /* mem */
899 "CardBus bridge window 1", /* mem */
900 };
901
902 if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
903 i < ARRAY_SIZE(cardbus_name))
904 return cardbus_name[i];
905
906 if (i < ARRAY_SIZE(bar_name))
907 return bar_name[i];
908
909 return "unknown";
910 }
911
912 /**
913 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
914 * @dev: the PCI device to operate on
915 * @pos: config space offset of status word
916 * @mask: mask of bit(s) to care about in status word
917 *
918 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
919 */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)920 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
921 {
922 int i;
923
924 /* Wait for Transaction Pending bit clean */
925 for (i = 0; i < 4; i++) {
926 u16 status;
927 if (i)
928 msleep((1 << (i - 1)) * 100);
929
930 pci_read_config_word(dev, pos, &status);
931 if (!(status & mask))
932 return 1;
933 }
934
935 return 0;
936 }
937
938 static int pci_acs_enable;
939
940 /**
941 * pci_request_acs - ask for ACS to be enabled if supported
942 */
pci_request_acs(void)943 void pci_request_acs(void)
944 {
945 pci_acs_enable = 1;
946 }
947
948 static const char *disable_acs_redir_param;
949 static const char *config_acs_param;
950
951 struct pci_acs {
952 u16 cap;
953 u16 ctrl;
954 u16 fw_ctrl;
955 };
956
__pci_config_acs(struct pci_dev * dev,struct pci_acs * caps,const char * p,u16 mask,u16 flags)957 static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
958 const char *p, u16 mask, u16 flags)
959 {
960 char *delimit;
961 int ret = 0;
962
963 if (!p)
964 return;
965
966 while (*p) {
967 if (!mask) {
968 /* Check for ACS flags */
969 delimit = strstr(p, "@");
970 if (delimit) {
971 int end;
972 u32 shift = 0;
973
974 end = delimit - p - 1;
975
976 while (end > -1) {
977 if (*(p + end) == '0') {
978 mask |= 1 << shift;
979 shift++;
980 end--;
981 } else if (*(p + end) == '1') {
982 mask |= 1 << shift;
983 flags |= 1 << shift;
984 shift++;
985 end--;
986 } else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
987 shift++;
988 end--;
989 } else {
990 pci_err(dev, "Invalid ACS flags... Ignoring\n");
991 return;
992 }
993 }
994 p = delimit + 1;
995 } else {
996 pci_err(dev, "ACS Flags missing\n");
997 return;
998 }
999 }
1000
1001 if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
1002 PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
1003 pci_err(dev, "Invalid ACS flags specified\n");
1004 return;
1005 }
1006
1007 ret = pci_dev_str_match(dev, p, &p);
1008 if (ret < 0) {
1009 pr_info_once("PCI: Can't parse ACS command line parameter\n");
1010 break;
1011 } else if (ret == 1) {
1012 /* Found a match */
1013 break;
1014 }
1015
1016 if (*p != ';' && *p != ',') {
1017 /* End of param or invalid format */
1018 break;
1019 }
1020 p++;
1021 }
1022
1023 if (ret != 1)
1024 return;
1025
1026 if (!pci_dev_specific_disable_acs_redir(dev))
1027 return;
1028
1029 pci_dbg(dev, "ACS mask = %#06x\n", mask);
1030 pci_dbg(dev, "ACS flags = %#06x\n", flags);
1031
1032 /* If mask is 0 then we copy the bit from the firmware setting. */
1033 caps->ctrl = (caps->ctrl & ~mask) | (caps->fw_ctrl & mask);
1034 caps->ctrl |= flags;
1035
1036 pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
1037 }
1038
1039 /**
1040 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
1041 * @dev: the PCI device
1042 * @caps: default ACS controls
1043 */
pci_std_enable_acs(struct pci_dev * dev,struct pci_acs * caps)1044 static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
1045 {
1046 /* Source Validation */
1047 caps->ctrl |= (caps->cap & PCI_ACS_SV);
1048
1049 /* P2P Request Redirect */
1050 caps->ctrl |= (caps->cap & PCI_ACS_RR);
1051
1052 /* P2P Completion Redirect */
1053 caps->ctrl |= (caps->cap & PCI_ACS_CR);
1054
1055 /* Upstream Forwarding */
1056 caps->ctrl |= (caps->cap & PCI_ACS_UF);
1057
1058 /* Enable Translation Blocking for external devices and noats */
1059 if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
1060 caps->ctrl |= (caps->cap & PCI_ACS_TB);
1061 }
1062
1063 /**
1064 * pci_enable_acs - enable ACS if hardware support it
1065 * @dev: the PCI device
1066 */
pci_enable_acs(struct pci_dev * dev)1067 static void pci_enable_acs(struct pci_dev *dev)
1068 {
1069 struct pci_acs caps;
1070 int pos;
1071
1072 pos = dev->acs_cap;
1073 if (!pos)
1074 return;
1075
1076 pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
1077 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
1078 caps.fw_ctrl = caps.ctrl;
1079
1080 /* If an iommu is present we start with kernel default caps */
1081 if (pci_acs_enable) {
1082 if (pci_dev_specific_enable_acs(dev))
1083 pci_std_enable_acs(dev, &caps);
1084 }
1085
1086 /*
1087 * Always apply caps from the command line, even if there is no iommu.
1088 * Trust that the admin has a reason to change the ACS settings.
1089 */
1090 __pci_config_acs(dev, &caps, disable_acs_redir_param,
1091 PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
1092 ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
1093 __pci_config_acs(dev, &caps, config_acs_param, 0, 0);
1094
1095 pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
1096 }
1097
1098 /**
1099 * pcie_read_tlp_log - read TLP Header Log
1100 * @dev: PCIe device
1101 * @where: PCI Config offset of TLP Header Log
1102 * @tlp_log: TLP Log structure to fill
1103 *
1104 * Fill @tlp_log from TLP Header Log registers, e.g., AER or DPC.
1105 *
1106 * Return: 0 on success and filled TLP Log structure, <0 on error.
1107 */
pcie_read_tlp_log(struct pci_dev * dev,int where,struct pcie_tlp_log * tlp_log)1108 int pcie_read_tlp_log(struct pci_dev *dev, int where,
1109 struct pcie_tlp_log *tlp_log)
1110 {
1111 int i, ret;
1112
1113 memset(tlp_log, 0, sizeof(*tlp_log));
1114
1115 for (i = 0; i < 4; i++) {
1116 ret = pci_read_config_dword(dev, where + i * 4,
1117 &tlp_log->dw[i]);
1118 if (ret)
1119 return pcibios_err_to_errno(ret);
1120 }
1121
1122 return 0;
1123 }
1124 EXPORT_SYMBOL_GPL(pcie_read_tlp_log);
1125
1126 /**
1127 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
1128 * @dev: PCI device to have its BARs restored
1129 *
1130 * Restore the BAR values for a given device, so as to make it
1131 * accessible by its driver.
1132 */
pci_restore_bars(struct pci_dev * dev)1133 static void pci_restore_bars(struct pci_dev *dev)
1134 {
1135 int i;
1136
1137 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1138 pci_update_resource(dev, i);
1139 }
1140
platform_pci_power_manageable(struct pci_dev * dev)1141 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1142 {
1143 if (pci_use_mid_pm())
1144 return true;
1145
1146 return acpi_pci_power_manageable(dev);
1147 }
1148
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)1149 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1150 pci_power_t t)
1151 {
1152 if (pci_use_mid_pm())
1153 return mid_pci_set_power_state(dev, t);
1154
1155 return acpi_pci_set_power_state(dev, t);
1156 }
1157
platform_pci_get_power_state(struct pci_dev * dev)1158 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1159 {
1160 if (pci_use_mid_pm())
1161 return mid_pci_get_power_state(dev);
1162
1163 return acpi_pci_get_power_state(dev);
1164 }
1165
platform_pci_refresh_power_state(struct pci_dev * dev)1166 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1167 {
1168 if (!pci_use_mid_pm())
1169 acpi_pci_refresh_power_state(dev);
1170 }
1171
platform_pci_choose_state(struct pci_dev * dev)1172 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1173 {
1174 if (pci_use_mid_pm())
1175 return PCI_POWER_ERROR;
1176
1177 return acpi_pci_choose_state(dev);
1178 }
1179
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)1180 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1181 {
1182 if (pci_use_mid_pm())
1183 return PCI_POWER_ERROR;
1184
1185 return acpi_pci_wakeup(dev, enable);
1186 }
1187
platform_pci_need_resume(struct pci_dev * dev)1188 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1189 {
1190 if (pci_use_mid_pm())
1191 return false;
1192
1193 return acpi_pci_need_resume(dev);
1194 }
1195
platform_pci_bridge_d3(struct pci_dev * dev)1196 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1197 {
1198 if (pci_use_mid_pm())
1199 return false;
1200
1201 return acpi_pci_bridge_d3(dev);
1202 }
1203
1204 /**
1205 * pci_update_current_state - Read power state of given device and cache it
1206 * @dev: PCI device to handle.
1207 * @state: State to cache in case the device doesn't have the PM capability
1208 *
1209 * The power state is read from the PMCSR register, which however is
1210 * inaccessible in D3cold. The platform firmware is therefore queried first
1211 * to detect accessibility of the register. In case the platform firmware
1212 * reports an incorrect state or the device isn't power manageable by the
1213 * platform at all, we try to detect D3cold by testing accessibility of the
1214 * vendor ID in config space.
1215 */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)1216 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1217 {
1218 if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1219 dev->current_state = PCI_D3cold;
1220 } else if (dev->pm_cap) {
1221 u16 pmcsr;
1222
1223 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1224 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1225 dev->current_state = PCI_D3cold;
1226 return;
1227 }
1228 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1229 } else {
1230 dev->current_state = state;
1231 }
1232 }
1233
1234 /**
1235 * pci_refresh_power_state - Refresh the given device's power state data
1236 * @dev: Target PCI device.
1237 *
1238 * Ask the platform to refresh the devices power state information and invoke
1239 * pci_update_current_state() to update its current PCI power state.
1240 */
pci_refresh_power_state(struct pci_dev * dev)1241 void pci_refresh_power_state(struct pci_dev *dev)
1242 {
1243 platform_pci_refresh_power_state(dev);
1244 pci_update_current_state(dev, dev->current_state);
1245 }
1246
1247 /**
1248 * pci_platform_power_transition - Use platform to change device power state
1249 * @dev: PCI device to handle.
1250 * @state: State to put the device into.
1251 */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)1252 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1253 {
1254 int error;
1255
1256 error = platform_pci_set_power_state(dev, state);
1257 if (!error)
1258 pci_update_current_state(dev, state);
1259 else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1260 dev->current_state = PCI_D0;
1261
1262 return error;
1263 }
1264 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1265
pci_resume_one(struct pci_dev * pci_dev,void * ign)1266 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1267 {
1268 pm_request_resume(&pci_dev->dev);
1269 return 0;
1270 }
1271
1272 /**
1273 * pci_resume_bus - Walk given bus and runtime resume devices on it
1274 * @bus: Top bus of the subtree to walk.
1275 */
pci_resume_bus(struct pci_bus * bus)1276 void pci_resume_bus(struct pci_bus *bus)
1277 {
1278 if (bus)
1279 pci_walk_bus(bus, pci_resume_one, NULL);
1280 }
1281
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)1282 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1283 {
1284 int delay = 1;
1285 bool retrain = false;
1286 struct pci_dev *root, *bridge;
1287
1288 root = pcie_find_root_port(dev);
1289
1290 if (pci_is_pcie(dev)) {
1291 bridge = pci_upstream_bridge(dev);
1292 if (bridge)
1293 retrain = true;
1294 }
1295
1296 /*
1297 * The caller has already waited long enough after a reset that the
1298 * device should respond to config requests, but it may respond
1299 * with Request Retry Status (RRS) if it needs more time to
1300 * initialize.
1301 *
1302 * If the device is below a Root Port with Configuration RRS
1303 * Software Visibility enabled, reading the Vendor ID returns a
1304 * special data value if the device responded with RRS. Read the
1305 * Vendor ID until we get non-RRS status.
1306 *
1307 * If there's no Root Port or Configuration RRS Software Visibility
1308 * is not enabled, the device may still respond with RRS, but
1309 * hardware may retry the config request. If no retries receive
1310 * Successful Completion, hardware generally synthesizes ~0
1311 * (PCI_ERROR_RESPONSE) data to complete the read. Reading Vendor
1312 * ID for VFs and non-existent devices also returns ~0, so read the
1313 * Command register until it returns something other than ~0.
1314 */
1315 for (;;) {
1316 u32 id;
1317
1318 if (pci_dev_is_disconnected(dev)) {
1319 pci_dbg(dev, "disconnected; not waiting\n");
1320 return -ENOTTY;
1321 }
1322
1323 if (root && root->config_rrs_sv) {
1324 pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
1325 if (!pci_bus_rrs_vendor_id(id))
1326 break;
1327 } else {
1328 pci_read_config_dword(dev, PCI_COMMAND, &id);
1329 if (!PCI_POSSIBLE_ERROR(id))
1330 break;
1331 }
1332
1333 if (delay > timeout) {
1334 pci_warn(dev, "not ready %dms after %s; giving up\n",
1335 delay - 1, reset_type);
1336 return -ENOTTY;
1337 }
1338
1339 if (delay > PCI_RESET_WAIT) {
1340 if (retrain) {
1341 retrain = false;
1342 if (pcie_failed_link_retrain(bridge) == 0) {
1343 delay = 1;
1344 continue;
1345 }
1346 }
1347 pci_info(dev, "not ready %dms after %s; waiting\n",
1348 delay - 1, reset_type);
1349 }
1350
1351 msleep(delay);
1352 delay *= 2;
1353 }
1354
1355 if (delay > PCI_RESET_WAIT)
1356 pci_info(dev, "ready %dms after %s\n", delay - 1,
1357 reset_type);
1358 else
1359 pci_dbg(dev, "ready %dms after %s\n", delay - 1,
1360 reset_type);
1361
1362 return 0;
1363 }
1364
1365 /**
1366 * pci_power_up - Put the given device into D0
1367 * @dev: PCI device to power up
1368 *
1369 * On success, return 0 or 1, depending on whether or not it is necessary to
1370 * restore the device's BARs subsequently (1 is returned in that case).
1371 *
1372 * On failure, return a negative error code. Always return failure if @dev
1373 * lacks a Power Management Capability, even if the platform was able to
1374 * put the device in D0 via non-PCI means.
1375 */
pci_power_up(struct pci_dev * dev)1376 int pci_power_up(struct pci_dev *dev)
1377 {
1378 bool need_restore;
1379 pci_power_t state;
1380 u16 pmcsr;
1381
1382 platform_pci_set_power_state(dev, PCI_D0);
1383
1384 if (!dev->pm_cap) {
1385 state = platform_pci_get_power_state(dev);
1386 if (state == PCI_UNKNOWN)
1387 dev->current_state = PCI_D0;
1388 else
1389 dev->current_state = state;
1390
1391 return -EIO;
1392 }
1393
1394 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1395 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1396 pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1397 pci_power_name(dev->current_state));
1398 dev->current_state = PCI_D3cold;
1399 return -EIO;
1400 }
1401
1402 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1403
1404 need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1405 !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1406
1407 if (state == PCI_D0)
1408 goto end;
1409
1410 /*
1411 * Force the entire word to 0. This doesn't affect PME_Status, disables
1412 * PME_En, and sets PowerState to 0.
1413 */
1414 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1415
1416 /* Mandatory transition delays; see PCI PM 1.2. */
1417 if (state == PCI_D3hot)
1418 pci_dev_d3_sleep(dev);
1419 else if (state == PCI_D2)
1420 udelay(PCI_PM_D2_DELAY);
1421
1422 end:
1423 dev->current_state = PCI_D0;
1424 if (need_restore)
1425 return 1;
1426
1427 return 0;
1428 }
1429
1430 /**
1431 * pci_set_full_power_state - Put a PCI device into D0 and update its state
1432 * @dev: PCI device to power up
1433 * @locked: whether pci_bus_sem is held
1434 *
1435 * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1436 * to confirm the state change, restore its BARs if they might be lost and
1437 * reconfigure ASPM in accordance with the new power state.
1438 *
1439 * If pci_restore_state() is going to be called right after a power state change
1440 * to D0, it is more efficient to use pci_power_up() directly instead of this
1441 * function.
1442 */
pci_set_full_power_state(struct pci_dev * dev,bool locked)1443 static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
1444 {
1445 u16 pmcsr;
1446 int ret;
1447
1448 ret = pci_power_up(dev);
1449 if (ret < 0) {
1450 if (dev->current_state == PCI_D0)
1451 return 0;
1452
1453 return ret;
1454 }
1455
1456 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1457 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1458 if (dev->current_state != PCI_D0) {
1459 pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1460 pci_power_name(dev->current_state));
1461 } else if (ret > 0) {
1462 /*
1463 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1464 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1465 * from D3hot to D0 _may_ perform an internal reset, thereby
1466 * going to "D0 Uninitialized" rather than "D0 Initialized".
1467 * For example, at least some versions of the 3c905B and the
1468 * 3c556B exhibit this behaviour.
1469 *
1470 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1471 * devices in a D3hot state at boot. Consequently, we need to
1472 * restore at least the BARs so that the device will be
1473 * accessible to its driver.
1474 */
1475 pci_restore_bars(dev);
1476 }
1477
1478 if (dev->bus->self)
1479 pcie_aspm_pm_state_change(dev->bus->self, locked);
1480
1481 return 0;
1482 }
1483
1484 /**
1485 * __pci_dev_set_current_state - Set current state of a PCI device
1486 * @dev: Device to handle
1487 * @data: pointer to state to be set
1488 */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1489 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1490 {
1491 pci_power_t state = *(pci_power_t *)data;
1492
1493 dev->current_state = state;
1494 return 0;
1495 }
1496
1497 /**
1498 * pci_bus_set_current_state - Walk given bus and set current state of devices
1499 * @bus: Top bus of the subtree to walk.
1500 * @state: state to be set
1501 */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1502 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1503 {
1504 if (bus)
1505 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1506 }
1507
__pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state,bool locked)1508 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
1509 {
1510 if (!bus)
1511 return;
1512
1513 if (locked)
1514 pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
1515 else
1516 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1517 }
1518
1519 /**
1520 * pci_set_low_power_state - Put a PCI device into a low-power state.
1521 * @dev: PCI device to handle.
1522 * @state: PCI power state (D1, D2, D3hot) to put the device into.
1523 * @locked: whether pci_bus_sem is held
1524 *
1525 * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1526 *
1527 * RETURN VALUE:
1528 * -EINVAL if the requested state is invalid.
1529 * -EIO if device does not support PCI PM or its PM capabilities register has a
1530 * wrong version, or device doesn't support the requested state.
1531 * 0 if device already is in the requested state.
1532 * 0 if device's power state has been successfully changed.
1533 */
pci_set_low_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1534 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1535 {
1536 u16 pmcsr;
1537
1538 if (!dev->pm_cap)
1539 return -EIO;
1540
1541 /*
1542 * Validate transition: We can enter D0 from any state, but if
1543 * we're already in a low-power state, we can only go deeper. E.g.,
1544 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1545 * we'd have to go from D3 to D0, then to D1.
1546 */
1547 if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1548 pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1549 pci_power_name(dev->current_state),
1550 pci_power_name(state));
1551 return -EINVAL;
1552 }
1553
1554 /* Check if this device supports the desired state */
1555 if ((state == PCI_D1 && !dev->d1_support)
1556 || (state == PCI_D2 && !dev->d2_support))
1557 return -EIO;
1558
1559 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1560 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1561 pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1562 pci_power_name(dev->current_state),
1563 pci_power_name(state));
1564 dev->current_state = PCI_D3cold;
1565 return -EIO;
1566 }
1567
1568 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1569 pmcsr |= state;
1570
1571 /* Enter specified state */
1572 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1573
1574 /* Mandatory power management transition delays; see PCI PM 1.2. */
1575 if (state == PCI_D3hot)
1576 pci_dev_d3_sleep(dev);
1577 else if (state == PCI_D2)
1578 udelay(PCI_PM_D2_DELAY);
1579
1580 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1581 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1582 if (dev->current_state != state)
1583 pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1584 pci_power_name(dev->current_state),
1585 pci_power_name(state));
1586
1587 if (dev->bus->self)
1588 pcie_aspm_pm_state_change(dev->bus->self, locked);
1589
1590 return 0;
1591 }
1592
__pci_set_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1593 static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1594 {
1595 int error;
1596
1597 /* Bound the state we're entering */
1598 if (state > PCI_D3cold)
1599 state = PCI_D3cold;
1600 else if (state < PCI_D0)
1601 state = PCI_D0;
1602 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1603
1604 /*
1605 * If the device or the parent bridge do not support PCI
1606 * PM, ignore the request if we're doing anything other
1607 * than putting it into D0 (which would only happen on
1608 * boot).
1609 */
1610 return 0;
1611
1612 /* Check if we're already there */
1613 if (dev->current_state == state)
1614 return 0;
1615
1616 if (state == PCI_D0)
1617 return pci_set_full_power_state(dev, locked);
1618
1619 /*
1620 * This device is quirked not to be put into D3, so don't put it in
1621 * D3
1622 */
1623 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1624 return 0;
1625
1626 if (state == PCI_D3cold) {
1627 /*
1628 * To put the device in D3cold, put it into D3hot in the native
1629 * way, then put it into D3cold using platform ops.
1630 */
1631 error = pci_set_low_power_state(dev, PCI_D3hot, locked);
1632
1633 if (pci_platform_power_transition(dev, PCI_D3cold))
1634 return error;
1635
1636 /* Powering off a bridge may power off the whole hierarchy */
1637 if (dev->current_state == PCI_D3cold)
1638 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
1639 } else {
1640 error = pci_set_low_power_state(dev, state, locked);
1641
1642 if (pci_platform_power_transition(dev, state))
1643 return error;
1644 }
1645
1646 return 0;
1647 }
1648
1649 /**
1650 * pci_set_power_state - Set the power state of a PCI device
1651 * @dev: PCI device to handle.
1652 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1653 *
1654 * Transition a device to a new power state, using the platform firmware and/or
1655 * the device's PCI PM registers.
1656 *
1657 * RETURN VALUE:
1658 * -EINVAL if the requested state is invalid.
1659 * -EIO if device does not support PCI PM or its PM capabilities register has a
1660 * wrong version, or device doesn't support the requested state.
1661 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1662 * 0 if device already is in the requested state.
1663 * 0 if the transition is to D3 but D3 is not supported.
1664 * 0 if device's power state has been successfully changed.
1665 */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1666 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1667 {
1668 return __pci_set_power_state(dev, state, false);
1669 }
1670 EXPORT_SYMBOL(pci_set_power_state);
1671
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)1672 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
1673 {
1674 lockdep_assert_held(&pci_bus_sem);
1675
1676 return __pci_set_power_state(dev, state, true);
1677 }
1678 EXPORT_SYMBOL(pci_set_power_state_locked);
1679
1680 #define PCI_EXP_SAVE_REGS 7
1681
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1682 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1683 u16 cap, bool extended)
1684 {
1685 struct pci_cap_saved_state *tmp;
1686
1687 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1688 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1689 return tmp;
1690 }
1691 return NULL;
1692 }
1693
pci_find_saved_cap(struct pci_dev * dev,char cap)1694 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1695 {
1696 return _pci_find_saved_cap(dev, cap, false);
1697 }
1698
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1699 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1700 {
1701 return _pci_find_saved_cap(dev, cap, true);
1702 }
1703
pci_save_pcie_state(struct pci_dev * dev)1704 static int pci_save_pcie_state(struct pci_dev *dev)
1705 {
1706 int i = 0;
1707 struct pci_cap_saved_state *save_state;
1708 u16 *cap;
1709
1710 if (!pci_is_pcie(dev))
1711 return 0;
1712
1713 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1714 if (!save_state) {
1715 pci_err(dev, "buffer not found in %s\n", __func__);
1716 return -ENOMEM;
1717 }
1718
1719 cap = (u16 *)&save_state->cap.data[0];
1720 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1721 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1722 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1723 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1724 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1725 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1726 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1727
1728 pci_save_aspm_l1ss_state(dev);
1729 pci_save_ltr_state(dev);
1730
1731 return 0;
1732 }
1733
pci_restore_pcie_state(struct pci_dev * dev)1734 static void pci_restore_pcie_state(struct pci_dev *dev)
1735 {
1736 int i = 0;
1737 struct pci_cap_saved_state *save_state;
1738 u16 *cap;
1739
1740 /*
1741 * Restore max latencies (in the LTR capability) before enabling
1742 * LTR itself in PCI_EXP_DEVCTL2.
1743 */
1744 pci_restore_ltr_state(dev);
1745 pci_restore_aspm_l1ss_state(dev);
1746
1747 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1748 if (!save_state)
1749 return;
1750
1751 /*
1752 * Downstream ports reset the LTR enable bit when link goes down.
1753 * Check and re-configure the bit here before restoring device.
1754 * PCIe r5.0, sec 7.5.3.16.
1755 */
1756 pci_bridge_reconfigure_ltr(dev);
1757
1758 cap = (u16 *)&save_state->cap.data[0];
1759 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1760 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1761 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1762 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1763 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1764 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1765 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1766 }
1767
pci_save_pcix_state(struct pci_dev * dev)1768 static int pci_save_pcix_state(struct pci_dev *dev)
1769 {
1770 int pos;
1771 struct pci_cap_saved_state *save_state;
1772
1773 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1774 if (!pos)
1775 return 0;
1776
1777 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1778 if (!save_state) {
1779 pci_err(dev, "buffer not found in %s\n", __func__);
1780 return -ENOMEM;
1781 }
1782
1783 pci_read_config_word(dev, pos + PCI_X_CMD,
1784 (u16 *)save_state->cap.data);
1785
1786 return 0;
1787 }
1788
pci_restore_pcix_state(struct pci_dev * dev)1789 static void pci_restore_pcix_state(struct pci_dev *dev)
1790 {
1791 int i = 0, pos;
1792 struct pci_cap_saved_state *save_state;
1793 u16 *cap;
1794
1795 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1796 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1797 if (!save_state || !pos)
1798 return;
1799 cap = (u16 *)&save_state->cap.data[0];
1800
1801 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1802 }
1803
1804 /**
1805 * pci_save_state - save the PCI configuration space of a device before
1806 * suspending
1807 * @dev: PCI device that we're dealing with
1808 */
pci_save_state(struct pci_dev * dev)1809 int pci_save_state(struct pci_dev *dev)
1810 {
1811 int i;
1812 /* XXX: 100% dword access ok here? */
1813 for (i = 0; i < 16; i++) {
1814 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1815 pci_dbg(dev, "save config %#04x: %#010x\n",
1816 i * 4, dev->saved_config_space[i]);
1817 }
1818 dev->state_saved = true;
1819
1820 i = pci_save_pcie_state(dev);
1821 if (i != 0)
1822 return i;
1823
1824 i = pci_save_pcix_state(dev);
1825 if (i != 0)
1826 return i;
1827
1828 pci_save_dpc_state(dev);
1829 pci_save_aer_state(dev);
1830 pci_save_ptm_state(dev);
1831 return pci_save_vc_state(dev);
1832 }
1833 EXPORT_SYMBOL(pci_save_state);
1834
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1835 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1836 u32 saved_val, int retry, bool force)
1837 {
1838 u32 val;
1839
1840 pci_read_config_dword(pdev, offset, &val);
1841 if (!force && val == saved_val)
1842 return;
1843
1844 for (;;) {
1845 pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n",
1846 offset, val, saved_val);
1847 pci_write_config_dword(pdev, offset, saved_val);
1848 if (retry-- <= 0)
1849 return;
1850
1851 pci_read_config_dword(pdev, offset, &val);
1852 if (val == saved_val)
1853 return;
1854
1855 mdelay(1);
1856 }
1857 }
1858
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1859 static void pci_restore_config_space_range(struct pci_dev *pdev,
1860 int start, int end, int retry,
1861 bool force)
1862 {
1863 int index;
1864
1865 for (index = end; index >= start; index--)
1866 pci_restore_config_dword(pdev, 4 * index,
1867 pdev->saved_config_space[index],
1868 retry, force);
1869 }
1870
pci_restore_config_space(struct pci_dev * pdev)1871 static void pci_restore_config_space(struct pci_dev *pdev)
1872 {
1873 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1874 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1875 /* Restore BARs before the command register. */
1876 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1877 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1878 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1879 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1880
1881 /*
1882 * Force rewriting of prefetch registers to avoid S3 resume
1883 * issues on Intel PCI bridges that occur when these
1884 * registers are not explicitly written.
1885 */
1886 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1887 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1888 } else {
1889 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1890 }
1891 }
1892
pci_restore_rebar_state(struct pci_dev * pdev)1893 static void pci_restore_rebar_state(struct pci_dev *pdev)
1894 {
1895 unsigned int pos, nbars, i;
1896 u32 ctrl;
1897
1898 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1899 if (!pos)
1900 return;
1901
1902 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1903 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
1904
1905 for (i = 0; i < nbars; i++, pos += 8) {
1906 struct resource *res;
1907 int bar_idx, size;
1908
1909 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1910 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1911 res = pdev->resource + bar_idx;
1912 size = pci_rebar_bytes_to_size(resource_size(res));
1913 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1914 ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
1915 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1916 }
1917 }
1918
1919 /**
1920 * pci_restore_state - Restore the saved state of a PCI device
1921 * @dev: PCI device that we're dealing with
1922 */
pci_restore_state(struct pci_dev * dev)1923 void pci_restore_state(struct pci_dev *dev)
1924 {
1925 if (!dev->state_saved)
1926 return;
1927
1928 pci_restore_pcie_state(dev);
1929 pci_restore_pasid_state(dev);
1930 pci_restore_pri_state(dev);
1931 pci_restore_ats_state(dev);
1932 pci_restore_vc_state(dev);
1933 pci_restore_rebar_state(dev);
1934 pci_restore_dpc_state(dev);
1935 pci_restore_ptm_state(dev);
1936
1937 pci_aer_clear_status(dev);
1938 pci_restore_aer_state(dev);
1939
1940 pci_restore_config_space(dev);
1941
1942 pci_restore_pcix_state(dev);
1943 pci_restore_msi_state(dev);
1944
1945 /* Restore ACS and IOV configuration state */
1946 pci_enable_acs(dev);
1947 pci_restore_iov_state(dev);
1948
1949 dev->state_saved = false;
1950 }
1951 EXPORT_SYMBOL(pci_restore_state);
1952
1953 struct pci_saved_state {
1954 u32 config_space[16];
1955 struct pci_cap_saved_data cap[];
1956 };
1957
1958 /**
1959 * pci_store_saved_state - Allocate and return an opaque struct containing
1960 * the device saved state.
1961 * @dev: PCI device that we're dealing with
1962 *
1963 * Return NULL if no state or error.
1964 */
pci_store_saved_state(struct pci_dev * dev)1965 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1966 {
1967 struct pci_saved_state *state;
1968 struct pci_cap_saved_state *tmp;
1969 struct pci_cap_saved_data *cap;
1970 size_t size;
1971
1972 if (!dev->state_saved)
1973 return NULL;
1974
1975 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1976
1977 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1978 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1979
1980 state = kzalloc(size, GFP_KERNEL);
1981 if (!state)
1982 return NULL;
1983
1984 memcpy(state->config_space, dev->saved_config_space,
1985 sizeof(state->config_space));
1986
1987 cap = state->cap;
1988 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1989 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1990 memcpy(cap, &tmp->cap, len);
1991 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1992 }
1993 /* Empty cap_save terminates list */
1994
1995 return state;
1996 }
1997 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1998
1999 /**
2000 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
2001 * @dev: PCI device that we're dealing with
2002 * @state: Saved state returned from pci_store_saved_state()
2003 */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)2004 int pci_load_saved_state(struct pci_dev *dev,
2005 struct pci_saved_state *state)
2006 {
2007 struct pci_cap_saved_data *cap;
2008
2009 dev->state_saved = false;
2010
2011 if (!state)
2012 return 0;
2013
2014 memcpy(dev->saved_config_space, state->config_space,
2015 sizeof(state->config_space));
2016
2017 cap = state->cap;
2018 while (cap->size) {
2019 struct pci_cap_saved_state *tmp;
2020
2021 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
2022 if (!tmp || tmp->cap.size != cap->size)
2023 return -EINVAL;
2024
2025 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
2026 cap = (struct pci_cap_saved_data *)((u8 *)cap +
2027 sizeof(struct pci_cap_saved_data) + cap->size);
2028 }
2029
2030 dev->state_saved = true;
2031 return 0;
2032 }
2033 EXPORT_SYMBOL_GPL(pci_load_saved_state);
2034
2035 /**
2036 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
2037 * and free the memory allocated for it.
2038 * @dev: PCI device that we're dealing with
2039 * @state: Pointer to saved state returned from pci_store_saved_state()
2040 */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)2041 int pci_load_and_free_saved_state(struct pci_dev *dev,
2042 struct pci_saved_state **state)
2043 {
2044 int ret = pci_load_saved_state(dev, *state);
2045 kfree(*state);
2046 *state = NULL;
2047 return ret;
2048 }
2049 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
2050
pcibios_enable_device(struct pci_dev * dev,int bars)2051 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
2052 {
2053 return pci_enable_resources(dev, bars);
2054 }
2055
do_pci_enable_device(struct pci_dev * dev,int bars)2056 static int do_pci_enable_device(struct pci_dev *dev, int bars)
2057 {
2058 int err;
2059 struct pci_dev *bridge;
2060 u16 cmd;
2061 u8 pin;
2062
2063 err = pci_set_power_state(dev, PCI_D0);
2064 if (err < 0 && err != -EIO)
2065 return err;
2066
2067 bridge = pci_upstream_bridge(dev);
2068 if (bridge)
2069 pcie_aspm_powersave_config_link(bridge);
2070
2071 err = pcibios_enable_device(dev, bars);
2072 if (err < 0)
2073 return err;
2074 pci_fixup_device(pci_fixup_enable, dev);
2075
2076 if (dev->msi_enabled || dev->msix_enabled)
2077 return 0;
2078
2079 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
2080 if (pin) {
2081 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2082 if (cmd & PCI_COMMAND_INTX_DISABLE)
2083 pci_write_config_word(dev, PCI_COMMAND,
2084 cmd & ~PCI_COMMAND_INTX_DISABLE);
2085 }
2086
2087 return 0;
2088 }
2089
2090 /**
2091 * pci_reenable_device - Resume abandoned device
2092 * @dev: PCI device to be resumed
2093 *
2094 * NOTE: This function is a backend of pci_default_resume() and is not supposed
2095 * to be called by normal code, write proper resume handler and use it instead.
2096 */
pci_reenable_device(struct pci_dev * dev)2097 int pci_reenable_device(struct pci_dev *dev)
2098 {
2099 if (pci_is_enabled(dev))
2100 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
2101 return 0;
2102 }
2103 EXPORT_SYMBOL(pci_reenable_device);
2104
pci_enable_bridge(struct pci_dev * dev)2105 static void pci_enable_bridge(struct pci_dev *dev)
2106 {
2107 struct pci_dev *bridge;
2108 int retval;
2109
2110 bridge = pci_upstream_bridge(dev);
2111 if (bridge)
2112 pci_enable_bridge(bridge);
2113
2114 if (pci_is_enabled(dev)) {
2115 if (!dev->is_busmaster)
2116 pci_set_master(dev);
2117 return;
2118 }
2119
2120 retval = pci_enable_device(dev);
2121 if (retval)
2122 pci_err(dev, "Error enabling bridge (%d), continuing\n",
2123 retval);
2124 pci_set_master(dev);
2125 }
2126
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)2127 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
2128 {
2129 struct pci_dev *bridge;
2130 int err;
2131 int i, bars = 0;
2132
2133 /*
2134 * Power state could be unknown at this point, either due to a fresh
2135 * boot or a device removal call. So get the current power state
2136 * so that things like MSI message writing will behave as expected
2137 * (e.g. if the device really is in D0 at enable time).
2138 */
2139 pci_update_current_state(dev, dev->current_state);
2140
2141 if (atomic_inc_return(&dev->enable_cnt) > 1)
2142 return 0; /* already enabled */
2143
2144 bridge = pci_upstream_bridge(dev);
2145 if (bridge)
2146 pci_enable_bridge(bridge);
2147
2148 /* only skip sriov related */
2149 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2150 if (dev->resource[i].flags & flags)
2151 bars |= (1 << i);
2152 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2153 if (dev->resource[i].flags & flags)
2154 bars |= (1 << i);
2155
2156 err = do_pci_enable_device(dev, bars);
2157 if (err < 0)
2158 atomic_dec(&dev->enable_cnt);
2159 return err;
2160 }
2161
2162 /**
2163 * pci_enable_device_mem - Initialize a device for use with Memory space
2164 * @dev: PCI device to be initialized
2165 *
2166 * Initialize device before it's used by a driver. Ask low-level code
2167 * to enable Memory resources. Wake up the device if it was suspended.
2168 * Beware, this function can fail.
2169 */
pci_enable_device_mem(struct pci_dev * dev)2170 int pci_enable_device_mem(struct pci_dev *dev)
2171 {
2172 return pci_enable_device_flags(dev, IORESOURCE_MEM);
2173 }
2174 EXPORT_SYMBOL(pci_enable_device_mem);
2175
2176 /**
2177 * pci_enable_device - Initialize device before it's used by a driver.
2178 * @dev: PCI device to be initialized
2179 *
2180 * Initialize device before it's used by a driver. Ask low-level code
2181 * to enable I/O and memory. Wake up the device if it was suspended.
2182 * Beware, this function can fail.
2183 *
2184 * Note we don't actually enable the device many times if we call
2185 * this function repeatedly (we just increment the count).
2186 */
pci_enable_device(struct pci_dev * dev)2187 int pci_enable_device(struct pci_dev *dev)
2188 {
2189 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2190 }
2191 EXPORT_SYMBOL(pci_enable_device);
2192
2193 /*
2194 * pcibios_device_add - provide arch specific hooks when adding device dev
2195 * @dev: the PCI device being added
2196 *
2197 * Permits the platform to provide architecture specific functionality when
2198 * devices are added. This is the default implementation. Architecture
2199 * implementations can override this.
2200 */
pcibios_device_add(struct pci_dev * dev)2201 int __weak pcibios_device_add(struct pci_dev *dev)
2202 {
2203 return 0;
2204 }
2205
2206 /**
2207 * pcibios_release_device - provide arch specific hooks when releasing
2208 * device dev
2209 * @dev: the PCI device being released
2210 *
2211 * Permits the platform to provide architecture specific functionality when
2212 * devices are released. This is the default implementation. Architecture
2213 * implementations can override this.
2214 */
pcibios_release_device(struct pci_dev * dev)2215 void __weak pcibios_release_device(struct pci_dev *dev) {}
2216
2217 /**
2218 * pcibios_disable_device - disable arch specific PCI resources for device dev
2219 * @dev: the PCI device to disable
2220 *
2221 * Disables architecture specific PCI resources for the device. This
2222 * is the default implementation. Architecture implementations can
2223 * override this.
2224 */
pcibios_disable_device(struct pci_dev * dev)2225 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2226
do_pci_disable_device(struct pci_dev * dev)2227 static void do_pci_disable_device(struct pci_dev *dev)
2228 {
2229 u16 pci_command;
2230
2231 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2232 if (pci_command & PCI_COMMAND_MASTER) {
2233 pci_command &= ~PCI_COMMAND_MASTER;
2234 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2235 }
2236
2237 pcibios_disable_device(dev);
2238 }
2239
2240 /**
2241 * pci_disable_enabled_device - Disable device without updating enable_cnt
2242 * @dev: PCI device to disable
2243 *
2244 * NOTE: This function is a backend of PCI power management routines and is
2245 * not supposed to be called drivers.
2246 */
pci_disable_enabled_device(struct pci_dev * dev)2247 void pci_disable_enabled_device(struct pci_dev *dev)
2248 {
2249 if (pci_is_enabled(dev))
2250 do_pci_disable_device(dev);
2251 }
2252
2253 /**
2254 * pci_disable_device - Disable PCI device after use
2255 * @dev: PCI device to be disabled
2256 *
2257 * Signal to the system that the PCI device is not in use by the system
2258 * anymore. This only involves disabling PCI bus-mastering, if active.
2259 *
2260 * Note we don't actually disable the device until all callers of
2261 * pci_enable_device() have called pci_disable_device().
2262 */
pci_disable_device(struct pci_dev * dev)2263 void pci_disable_device(struct pci_dev *dev)
2264 {
2265 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2266 "disabling already-disabled device");
2267
2268 if (atomic_dec_return(&dev->enable_cnt) != 0)
2269 return;
2270
2271 do_pci_disable_device(dev);
2272
2273 dev->is_busmaster = 0;
2274 }
2275 EXPORT_SYMBOL(pci_disable_device);
2276
2277 /**
2278 * pcibios_set_pcie_reset_state - set reset state for device dev
2279 * @dev: the PCIe device reset
2280 * @state: Reset state to enter into
2281 *
2282 * Set the PCIe reset state for the device. This is the default
2283 * implementation. Architecture implementations can override this.
2284 */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2285 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2286 enum pcie_reset_state state)
2287 {
2288 return -EINVAL;
2289 }
2290
2291 /**
2292 * pci_set_pcie_reset_state - set reset state for device dev
2293 * @dev: the PCIe device reset
2294 * @state: Reset state to enter into
2295 *
2296 * Sets the PCI reset state for the device.
2297 */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2298 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2299 {
2300 return pcibios_set_pcie_reset_state(dev, state);
2301 }
2302 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2303
2304 #ifdef CONFIG_PCIEAER
pcie_clear_device_status(struct pci_dev * dev)2305 void pcie_clear_device_status(struct pci_dev *dev)
2306 {
2307 u16 sta;
2308
2309 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2310 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2311 }
2312 #endif
2313
2314 /**
2315 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2316 * @dev: PCIe root port or event collector.
2317 */
pcie_clear_root_pme_status(struct pci_dev * dev)2318 void pcie_clear_root_pme_status(struct pci_dev *dev)
2319 {
2320 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2321 }
2322
2323 /**
2324 * pci_check_pme_status - Check if given device has generated PME.
2325 * @dev: Device to check.
2326 *
2327 * Check the PME status of the device and if set, clear it and clear PME enable
2328 * (if set). Return 'true' if PME status and PME enable were both set or
2329 * 'false' otherwise.
2330 */
pci_check_pme_status(struct pci_dev * dev)2331 bool pci_check_pme_status(struct pci_dev *dev)
2332 {
2333 int pmcsr_pos;
2334 u16 pmcsr;
2335 bool ret = false;
2336
2337 if (!dev->pm_cap)
2338 return false;
2339
2340 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2341 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2342 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2343 return false;
2344
2345 /* Clear PME status. */
2346 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2347 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2348 /* Disable PME to avoid interrupt flood. */
2349 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2350 ret = true;
2351 }
2352
2353 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2354
2355 return ret;
2356 }
2357
2358 /**
2359 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2360 * @dev: Device to handle.
2361 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2362 *
2363 * Check if @dev has generated PME and queue a resume request for it in that
2364 * case.
2365 */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2366 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2367 {
2368 if (pme_poll_reset && dev->pme_poll)
2369 dev->pme_poll = false;
2370
2371 if (pci_check_pme_status(dev)) {
2372 pci_wakeup_event(dev);
2373 pm_request_resume(&dev->dev);
2374 }
2375 return 0;
2376 }
2377
2378 /**
2379 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2380 * @bus: Top bus of the subtree to walk.
2381 */
pci_pme_wakeup_bus(struct pci_bus * bus)2382 void pci_pme_wakeup_bus(struct pci_bus *bus)
2383 {
2384 if (bus)
2385 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2386 }
2387
2388
2389 /**
2390 * pci_pme_capable - check the capability of PCI device to generate PME#
2391 * @dev: PCI device to handle.
2392 * @state: PCI state from which device will issue PME#.
2393 */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2394 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2395 {
2396 if (!dev->pm_cap)
2397 return false;
2398
2399 return !!(dev->pme_support & (1 << state));
2400 }
2401 EXPORT_SYMBOL(pci_pme_capable);
2402
pci_pme_list_scan(struct work_struct * work)2403 static void pci_pme_list_scan(struct work_struct *work)
2404 {
2405 struct pci_pme_device *pme_dev, *n;
2406
2407 mutex_lock(&pci_pme_list_mutex);
2408 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2409 struct pci_dev *pdev = pme_dev->dev;
2410
2411 if (pdev->pme_poll) {
2412 struct pci_dev *bridge = pdev->bus->self;
2413 struct device *dev = &pdev->dev;
2414 struct device *bdev = bridge ? &bridge->dev : NULL;
2415 int bref = 0;
2416
2417 /*
2418 * If we have a bridge, it should be in an active/D0
2419 * state or the configuration space of subordinate
2420 * devices may not be accessible or stable over the
2421 * course of the call.
2422 */
2423 if (bdev) {
2424 bref = pm_runtime_get_if_active(bdev);
2425 if (!bref)
2426 continue;
2427
2428 if (bridge->current_state != PCI_D0)
2429 goto put_bridge;
2430 }
2431
2432 /*
2433 * The device itself should be suspended but config
2434 * space must be accessible, therefore it cannot be in
2435 * D3cold.
2436 */
2437 if (pm_runtime_suspended(dev) &&
2438 pdev->current_state != PCI_D3cold)
2439 pci_pme_wakeup(pdev, NULL);
2440
2441 put_bridge:
2442 if (bref > 0)
2443 pm_runtime_put(bdev);
2444 } else {
2445 list_del(&pme_dev->list);
2446 kfree(pme_dev);
2447 }
2448 }
2449 if (!list_empty(&pci_pme_list))
2450 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2451 msecs_to_jiffies(PME_TIMEOUT));
2452 mutex_unlock(&pci_pme_list_mutex);
2453 }
2454
__pci_pme_active(struct pci_dev * dev,bool enable)2455 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2456 {
2457 u16 pmcsr;
2458
2459 if (!dev->pme_support)
2460 return;
2461
2462 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2463 /* Clear PME_Status by writing 1 to it and enable PME# */
2464 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2465 if (!enable)
2466 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2467
2468 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2469 }
2470
2471 /**
2472 * pci_pme_restore - Restore PME configuration after config space restore.
2473 * @dev: PCI device to update.
2474 */
pci_pme_restore(struct pci_dev * dev)2475 void pci_pme_restore(struct pci_dev *dev)
2476 {
2477 u16 pmcsr;
2478
2479 if (!dev->pme_support)
2480 return;
2481
2482 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2483 if (dev->wakeup_prepared) {
2484 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2485 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2486 } else {
2487 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2488 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2489 }
2490 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2491 }
2492
2493 /**
2494 * pci_pme_active - enable or disable PCI device's PME# function
2495 * @dev: PCI device to handle.
2496 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2497 *
2498 * The caller must verify that the device is capable of generating PME# before
2499 * calling this function with @enable equal to 'true'.
2500 */
pci_pme_active(struct pci_dev * dev,bool enable)2501 void pci_pme_active(struct pci_dev *dev, bool enable)
2502 {
2503 __pci_pme_active(dev, enable);
2504
2505 /*
2506 * PCI (as opposed to PCIe) PME requires that the device have
2507 * its PME# line hooked up correctly. Not all hardware vendors
2508 * do this, so the PME never gets delivered and the device
2509 * remains asleep. The easiest way around this is to
2510 * periodically walk the list of suspended devices and check
2511 * whether any have their PME flag set. The assumption is that
2512 * we'll wake up often enough anyway that this won't be a huge
2513 * hit, and the power savings from the devices will still be a
2514 * win.
2515 *
2516 * Although PCIe uses in-band PME message instead of PME# line
2517 * to report PME, PME does not work for some PCIe devices in
2518 * reality. For example, there are devices that set their PME
2519 * status bits, but don't really bother to send a PME message;
2520 * there are PCI Express Root Ports that don't bother to
2521 * trigger interrupts when they receive PME messages from the
2522 * devices below. So PME poll is used for PCIe devices too.
2523 */
2524
2525 if (dev->pme_poll) {
2526 struct pci_pme_device *pme_dev;
2527 if (enable) {
2528 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2529 GFP_KERNEL);
2530 if (!pme_dev) {
2531 pci_warn(dev, "can't enable PME#\n");
2532 return;
2533 }
2534 pme_dev->dev = dev;
2535 mutex_lock(&pci_pme_list_mutex);
2536 list_add(&pme_dev->list, &pci_pme_list);
2537 if (list_is_singular(&pci_pme_list))
2538 queue_delayed_work(system_freezable_wq,
2539 &pci_pme_work,
2540 msecs_to_jiffies(PME_TIMEOUT));
2541 mutex_unlock(&pci_pme_list_mutex);
2542 } else {
2543 mutex_lock(&pci_pme_list_mutex);
2544 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2545 if (pme_dev->dev == dev) {
2546 list_del(&pme_dev->list);
2547 kfree(pme_dev);
2548 break;
2549 }
2550 }
2551 mutex_unlock(&pci_pme_list_mutex);
2552 }
2553 }
2554
2555 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2556 }
2557 EXPORT_SYMBOL(pci_pme_active);
2558
2559 /**
2560 * __pci_enable_wake - enable PCI device as wakeup event source
2561 * @dev: PCI device affected
2562 * @state: PCI state from which device will issue wakeup events
2563 * @enable: True to enable event generation; false to disable
2564 *
2565 * This enables the device as a wakeup event source, or disables it.
2566 * When such events involves platform-specific hooks, those hooks are
2567 * called automatically by this routine.
2568 *
2569 * Devices with legacy power management (no standard PCI PM capabilities)
2570 * always require such platform hooks.
2571 *
2572 * RETURN VALUE:
2573 * 0 is returned on success
2574 * -EINVAL is returned if device is not supposed to wake up the system
2575 * Error code depending on the platform is returned if both the platform and
2576 * the native mechanism fail to enable the generation of wake-up events
2577 */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2578 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2579 {
2580 int ret = 0;
2581
2582 /*
2583 * Bridges that are not power-manageable directly only signal
2584 * wakeup on behalf of subordinate devices which is set up
2585 * elsewhere, so skip them. However, bridges that are
2586 * power-manageable may signal wakeup for themselves (for example,
2587 * on a hotplug event) and they need to be covered here.
2588 */
2589 if (!pci_power_manageable(dev))
2590 return 0;
2591
2592 /* Don't do the same thing twice in a row for one device. */
2593 if (!!enable == !!dev->wakeup_prepared)
2594 return 0;
2595
2596 /*
2597 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2598 * Anderson we should be doing PME# wake enable followed by ACPI wake
2599 * enable. To disable wake-up we call the platform first, for symmetry.
2600 */
2601
2602 if (enable) {
2603 int error;
2604
2605 /*
2606 * Enable PME signaling if the device can signal PME from
2607 * D3cold regardless of whether or not it can signal PME from
2608 * the current target state, because that will allow it to
2609 * signal PME when the hierarchy above it goes into D3cold and
2610 * the device itself ends up in D3cold as a result of that.
2611 */
2612 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2613 pci_pme_active(dev, true);
2614 else
2615 ret = 1;
2616 error = platform_pci_set_wakeup(dev, true);
2617 if (ret)
2618 ret = error;
2619 if (!ret)
2620 dev->wakeup_prepared = true;
2621 } else {
2622 platform_pci_set_wakeup(dev, false);
2623 pci_pme_active(dev, false);
2624 dev->wakeup_prepared = false;
2625 }
2626
2627 return ret;
2628 }
2629
2630 /**
2631 * pci_enable_wake - change wakeup settings for a PCI device
2632 * @pci_dev: Target device
2633 * @state: PCI state from which device will issue wakeup events
2634 * @enable: Whether or not to enable event generation
2635 *
2636 * If @enable is set, check device_may_wakeup() for the device before calling
2637 * __pci_enable_wake() for it.
2638 */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2639 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2640 {
2641 if (enable && !device_may_wakeup(&pci_dev->dev))
2642 return -EINVAL;
2643
2644 return __pci_enable_wake(pci_dev, state, enable);
2645 }
2646 EXPORT_SYMBOL(pci_enable_wake);
2647
2648 /**
2649 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2650 * @dev: PCI device to prepare
2651 * @enable: True to enable wake-up event generation; false to disable
2652 *
2653 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2654 * and this function allows them to set that up cleanly - pci_enable_wake()
2655 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2656 * ordering constraints.
2657 *
2658 * This function only returns error code if the device is not allowed to wake
2659 * up the system from sleep or it is not capable of generating PME# from both
2660 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2661 */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2662 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2663 {
2664 return pci_pme_capable(dev, PCI_D3cold) ?
2665 pci_enable_wake(dev, PCI_D3cold, enable) :
2666 pci_enable_wake(dev, PCI_D3hot, enable);
2667 }
2668 EXPORT_SYMBOL(pci_wake_from_d3);
2669
2670 /**
2671 * pci_target_state - find an appropriate low power state for a given PCI dev
2672 * @dev: PCI device
2673 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2674 *
2675 * Use underlying platform code to find a supported low power state for @dev.
2676 * If the platform can't manage @dev, return the deepest state from which it
2677 * can generate wake events, based on any available PME info.
2678 */
pci_target_state(struct pci_dev * dev,bool wakeup)2679 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2680 {
2681 if (platform_pci_power_manageable(dev)) {
2682 /*
2683 * Call the platform to find the target state for the device.
2684 */
2685 pci_power_t state = platform_pci_choose_state(dev);
2686
2687 switch (state) {
2688 case PCI_POWER_ERROR:
2689 case PCI_UNKNOWN:
2690 return PCI_D3hot;
2691
2692 case PCI_D1:
2693 case PCI_D2:
2694 if (pci_no_d1d2(dev))
2695 return PCI_D3hot;
2696 }
2697
2698 return state;
2699 }
2700
2701 /*
2702 * If the device is in D3cold even though it's not power-manageable by
2703 * the platform, it may have been powered down by non-standard means.
2704 * Best to let it slumber.
2705 */
2706 if (dev->current_state == PCI_D3cold)
2707 return PCI_D3cold;
2708 else if (!dev->pm_cap)
2709 return PCI_D0;
2710
2711 if (wakeup && dev->pme_support) {
2712 pci_power_t state = PCI_D3hot;
2713
2714 /*
2715 * Find the deepest state from which the device can generate
2716 * PME#.
2717 */
2718 while (state && !(dev->pme_support & (1 << state)))
2719 state--;
2720
2721 if (state)
2722 return state;
2723 else if (dev->pme_support & 1)
2724 return PCI_D0;
2725 }
2726
2727 return PCI_D3hot;
2728 }
2729
2730 /**
2731 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2732 * into a sleep state
2733 * @dev: Device to handle.
2734 *
2735 * Choose the power state appropriate for the device depending on whether
2736 * it can wake up the system and/or is power manageable by the platform
2737 * (PCI_D3hot is the default) and put the device into that state.
2738 */
pci_prepare_to_sleep(struct pci_dev * dev)2739 int pci_prepare_to_sleep(struct pci_dev *dev)
2740 {
2741 bool wakeup = device_may_wakeup(&dev->dev);
2742 pci_power_t target_state = pci_target_state(dev, wakeup);
2743 int error;
2744
2745 if (target_state == PCI_POWER_ERROR)
2746 return -EIO;
2747
2748 pci_enable_wake(dev, target_state, wakeup);
2749
2750 error = pci_set_power_state(dev, target_state);
2751
2752 if (error)
2753 pci_enable_wake(dev, target_state, false);
2754
2755 return error;
2756 }
2757 EXPORT_SYMBOL(pci_prepare_to_sleep);
2758
2759 /**
2760 * pci_back_from_sleep - turn PCI device on during system-wide transition
2761 * into working state
2762 * @dev: Device to handle.
2763 *
2764 * Disable device's system wake-up capability and put it into D0.
2765 */
pci_back_from_sleep(struct pci_dev * dev)2766 int pci_back_from_sleep(struct pci_dev *dev)
2767 {
2768 int ret = pci_set_power_state(dev, PCI_D0);
2769
2770 if (ret)
2771 return ret;
2772
2773 pci_enable_wake(dev, PCI_D0, false);
2774 return 0;
2775 }
2776 EXPORT_SYMBOL(pci_back_from_sleep);
2777
2778 /**
2779 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2780 * @dev: PCI device being suspended.
2781 *
2782 * Prepare @dev to generate wake-up events at run time and put it into a low
2783 * power state.
2784 */
pci_finish_runtime_suspend(struct pci_dev * dev)2785 int pci_finish_runtime_suspend(struct pci_dev *dev)
2786 {
2787 pci_power_t target_state;
2788 int error;
2789
2790 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2791 if (target_state == PCI_POWER_ERROR)
2792 return -EIO;
2793
2794 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2795
2796 error = pci_set_power_state(dev, target_state);
2797
2798 if (error)
2799 pci_enable_wake(dev, target_state, false);
2800
2801 return error;
2802 }
2803
2804 /**
2805 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2806 * @dev: Device to check.
2807 *
2808 * Return true if the device itself is capable of generating wake-up events
2809 * (through the platform or using the native PCIe PME) or if the device supports
2810 * PME and one of its upstream bridges can generate wake-up events.
2811 */
pci_dev_run_wake(struct pci_dev * dev)2812 bool pci_dev_run_wake(struct pci_dev *dev)
2813 {
2814 struct pci_bus *bus = dev->bus;
2815
2816 if (!dev->pme_support)
2817 return false;
2818
2819 /* PME-capable in principle, but not from the target power state */
2820 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2821 return false;
2822
2823 if (device_can_wakeup(&dev->dev))
2824 return true;
2825
2826 while (bus->parent) {
2827 struct pci_dev *bridge = bus->self;
2828
2829 if (device_can_wakeup(&bridge->dev))
2830 return true;
2831
2832 bus = bus->parent;
2833 }
2834
2835 /* We have reached the root bus. */
2836 if (bus->bridge)
2837 return device_can_wakeup(bus->bridge);
2838
2839 return false;
2840 }
2841 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2842
2843 /**
2844 * pci_dev_need_resume - Check if it is necessary to resume the device.
2845 * @pci_dev: Device to check.
2846 *
2847 * Return 'true' if the device is not runtime-suspended or it has to be
2848 * reconfigured due to wakeup settings difference between system and runtime
2849 * suspend, or the current power state of it is not suitable for the upcoming
2850 * (system-wide) transition.
2851 */
pci_dev_need_resume(struct pci_dev * pci_dev)2852 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2853 {
2854 struct device *dev = &pci_dev->dev;
2855 pci_power_t target_state;
2856
2857 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2858 return true;
2859
2860 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2861
2862 /*
2863 * If the earlier platform check has not triggered, D3cold is just power
2864 * removal on top of D3hot, so no need to resume the device in that
2865 * case.
2866 */
2867 return target_state != pci_dev->current_state &&
2868 target_state != PCI_D3cold &&
2869 pci_dev->current_state != PCI_D3hot;
2870 }
2871
2872 /**
2873 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2874 * @pci_dev: Device to check.
2875 *
2876 * If the device is suspended and it is not configured for system wakeup,
2877 * disable PME for it to prevent it from waking up the system unnecessarily.
2878 *
2879 * Note that if the device's power state is D3cold and the platform check in
2880 * pci_dev_need_resume() has not triggered, the device's configuration need not
2881 * be changed.
2882 */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2883 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2884 {
2885 struct device *dev = &pci_dev->dev;
2886
2887 spin_lock_irq(&dev->power.lock);
2888
2889 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2890 pci_dev->current_state < PCI_D3cold)
2891 __pci_pme_active(pci_dev, false);
2892
2893 spin_unlock_irq(&dev->power.lock);
2894 }
2895
2896 /**
2897 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2898 * @pci_dev: Device to handle.
2899 *
2900 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2901 * it might have been disabled during the prepare phase of system suspend if
2902 * the device was not configured for system wakeup.
2903 */
pci_dev_complete_resume(struct pci_dev * pci_dev)2904 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2905 {
2906 struct device *dev = &pci_dev->dev;
2907
2908 if (!pci_dev_run_wake(pci_dev))
2909 return;
2910
2911 spin_lock_irq(&dev->power.lock);
2912
2913 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2914 __pci_pme_active(pci_dev, true);
2915
2916 spin_unlock_irq(&dev->power.lock);
2917 }
2918
2919 /**
2920 * pci_choose_state - Choose the power state of a PCI device.
2921 * @dev: Target PCI device.
2922 * @state: Target state for the whole system.
2923 *
2924 * Returns PCI power state suitable for @dev and @state.
2925 */
pci_choose_state(struct pci_dev * dev,pm_message_t state)2926 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2927 {
2928 if (state.event == PM_EVENT_ON)
2929 return PCI_D0;
2930
2931 return pci_target_state(dev, false);
2932 }
2933 EXPORT_SYMBOL(pci_choose_state);
2934
pci_config_pm_runtime_get(struct pci_dev * pdev)2935 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2936 {
2937 struct device *dev = &pdev->dev;
2938 struct device *parent = dev->parent;
2939
2940 if (parent)
2941 pm_runtime_get_sync(parent);
2942 pm_runtime_get_noresume(dev);
2943 /*
2944 * pdev->current_state is set to PCI_D3cold during suspending,
2945 * so wait until suspending completes
2946 */
2947 pm_runtime_barrier(dev);
2948 /*
2949 * Only need to resume devices in D3cold, because config
2950 * registers are still accessible for devices suspended but
2951 * not in D3cold.
2952 */
2953 if (pdev->current_state == PCI_D3cold)
2954 pm_runtime_resume(dev);
2955 }
2956
pci_config_pm_runtime_put(struct pci_dev * pdev)2957 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2958 {
2959 struct device *dev = &pdev->dev;
2960 struct device *parent = dev->parent;
2961
2962 pm_runtime_put(dev);
2963 if (parent)
2964 pm_runtime_put_sync(parent);
2965 }
2966
2967 static const struct dmi_system_id bridge_d3_blacklist[] = {
2968 #ifdef CONFIG_X86
2969 {
2970 /*
2971 * Gigabyte X299 root port is not marked as hotplug capable
2972 * which allows Linux to power manage it. However, this
2973 * confuses the BIOS SMI handler so don't power manage root
2974 * ports on that system.
2975 */
2976 .ident = "X299 DESIGNARE EX-CF",
2977 .matches = {
2978 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2979 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2980 },
2981 },
2982 {
2983 /*
2984 * Downstream device is not accessible after putting a root port
2985 * into D3cold and back into D0 on Elo Continental Z2 board
2986 */
2987 .ident = "Elo Continental Z2",
2988 .matches = {
2989 DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
2990 DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
2991 DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
2992 },
2993 },
2994 {
2995 /*
2996 * Changing power state of root port dGPU is connected fails
2997 * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
2998 */
2999 .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
3000 .matches = {
3001 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
3002 DMI_MATCH(DMI_BOARD_NAME, "1972"),
3003 DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
3004 },
3005 },
3006 #endif
3007 { }
3008 };
3009
3010 /**
3011 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
3012 * @bridge: Bridge to check
3013 *
3014 * This function checks if it is possible to move the bridge to D3.
3015 * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
3016 */
pci_bridge_d3_possible(struct pci_dev * bridge)3017 bool pci_bridge_d3_possible(struct pci_dev *bridge)
3018 {
3019 if (!pci_is_pcie(bridge))
3020 return false;
3021
3022 switch (pci_pcie_type(bridge)) {
3023 case PCI_EXP_TYPE_ROOT_PORT:
3024 case PCI_EXP_TYPE_UPSTREAM:
3025 case PCI_EXP_TYPE_DOWNSTREAM:
3026 if (pci_bridge_d3_disable)
3027 return false;
3028
3029 /*
3030 * Hotplug ports handled by firmware in System Management Mode
3031 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
3032 */
3033 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
3034 return false;
3035
3036 if (pci_bridge_d3_force)
3037 return true;
3038
3039 /* Even the oldest 2010 Thunderbolt controller supports D3. */
3040 if (bridge->is_thunderbolt)
3041 return true;
3042
3043 /* Platform might know better if the bridge supports D3 */
3044 if (platform_pci_bridge_d3(bridge))
3045 return true;
3046
3047 /*
3048 * Hotplug ports handled natively by the OS were not validated
3049 * by vendors for runtime D3 at least until 2018 because there
3050 * was no OS support.
3051 */
3052 if (bridge->is_hotplug_bridge)
3053 return false;
3054
3055 if (dmi_check_system(bridge_d3_blacklist))
3056 return false;
3057
3058 /*
3059 * It should be safe to put PCIe ports from 2015 or newer
3060 * to D3.
3061 */
3062 if (dmi_get_bios_year() >= 2015)
3063 return true;
3064 break;
3065 }
3066
3067 return false;
3068 }
3069
pci_dev_check_d3cold(struct pci_dev * dev,void * data)3070 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3071 {
3072 bool *d3cold_ok = data;
3073
3074 if (/* The device needs to be allowed to go D3cold ... */
3075 dev->no_d3cold || !dev->d3cold_allowed ||
3076
3077 /* ... and if it is wakeup capable to do so from D3cold. */
3078 (device_may_wakeup(&dev->dev) &&
3079 !pci_pme_capable(dev, PCI_D3cold)) ||
3080
3081 /* If it is a bridge it must be allowed to go to D3. */
3082 !pci_power_manageable(dev))
3083
3084 *d3cold_ok = false;
3085
3086 return !*d3cold_ok;
3087 }
3088
3089 /*
3090 * pci_bridge_d3_update - Update bridge D3 capabilities
3091 * @dev: PCI device which is changed
3092 *
3093 * Update upstream bridge PM capabilities accordingly depending on if the
3094 * device PM configuration was changed or the device is being removed. The
3095 * change is also propagated upstream.
3096 */
pci_bridge_d3_update(struct pci_dev * dev)3097 void pci_bridge_d3_update(struct pci_dev *dev)
3098 {
3099 bool remove = !device_is_registered(&dev->dev);
3100 struct pci_dev *bridge;
3101 bool d3cold_ok = true;
3102
3103 bridge = pci_upstream_bridge(dev);
3104 if (!bridge || !pci_bridge_d3_possible(bridge))
3105 return;
3106
3107 /*
3108 * If D3 is currently allowed for the bridge, removing one of its
3109 * children won't change that.
3110 */
3111 if (remove && bridge->bridge_d3)
3112 return;
3113
3114 /*
3115 * If D3 is currently allowed for the bridge and a child is added or
3116 * changed, disallowance of D3 can only be caused by that child, so
3117 * we only need to check that single device, not any of its siblings.
3118 *
3119 * If D3 is currently not allowed for the bridge, checking the device
3120 * first may allow us to skip checking its siblings.
3121 */
3122 if (!remove)
3123 pci_dev_check_d3cold(dev, &d3cold_ok);
3124
3125 /*
3126 * If D3 is currently not allowed for the bridge, this may be caused
3127 * either by the device being changed/removed or any of its siblings,
3128 * so we need to go through all children to find out if one of them
3129 * continues to block D3.
3130 */
3131 if (d3cold_ok && !bridge->bridge_d3)
3132 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3133 &d3cold_ok);
3134
3135 if (bridge->bridge_d3 != d3cold_ok) {
3136 bridge->bridge_d3 = d3cold_ok;
3137 /* Propagate change to upstream bridges */
3138 pci_bridge_d3_update(bridge);
3139 }
3140 }
3141
3142 /**
3143 * pci_d3cold_enable - Enable D3cold for device
3144 * @dev: PCI device to handle
3145 *
3146 * This function can be used in drivers to enable D3cold from the device
3147 * they handle. It also updates upstream PCI bridge PM capabilities
3148 * accordingly.
3149 */
pci_d3cold_enable(struct pci_dev * dev)3150 void pci_d3cold_enable(struct pci_dev *dev)
3151 {
3152 if (dev->no_d3cold) {
3153 dev->no_d3cold = false;
3154 pci_bridge_d3_update(dev);
3155 }
3156 }
3157 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3158
3159 /**
3160 * pci_d3cold_disable - Disable D3cold for device
3161 * @dev: PCI device to handle
3162 *
3163 * This function can be used in drivers to disable D3cold from the device
3164 * they handle. It also updates upstream PCI bridge PM capabilities
3165 * accordingly.
3166 */
pci_d3cold_disable(struct pci_dev * dev)3167 void pci_d3cold_disable(struct pci_dev *dev)
3168 {
3169 if (!dev->no_d3cold) {
3170 dev->no_d3cold = true;
3171 pci_bridge_d3_update(dev);
3172 }
3173 }
3174 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3175
3176 /**
3177 * pci_pm_init - Initialize PM functions of given PCI device
3178 * @dev: PCI device to handle.
3179 */
pci_pm_init(struct pci_dev * dev)3180 void pci_pm_init(struct pci_dev *dev)
3181 {
3182 int pm;
3183 u16 status;
3184 u16 pmc;
3185
3186 pm_runtime_forbid(&dev->dev);
3187 pm_runtime_set_active(&dev->dev);
3188 pm_runtime_enable(&dev->dev);
3189 device_enable_async_suspend(&dev->dev);
3190 dev->wakeup_prepared = false;
3191
3192 dev->pm_cap = 0;
3193 dev->pme_support = 0;
3194
3195 /* find PCI PM capability in list */
3196 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3197 if (!pm)
3198 return;
3199 /* Check device's ability to generate PME# */
3200 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3201
3202 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3203 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3204 pmc & PCI_PM_CAP_VER_MASK);
3205 return;
3206 }
3207
3208 dev->pm_cap = pm;
3209 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3210 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3211 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3212 dev->d3cold_allowed = true;
3213
3214 dev->d1_support = false;
3215 dev->d2_support = false;
3216 if (!pci_no_d1d2(dev)) {
3217 if (pmc & PCI_PM_CAP_D1)
3218 dev->d1_support = true;
3219 if (pmc & PCI_PM_CAP_D2)
3220 dev->d2_support = true;
3221
3222 if (dev->d1_support || dev->d2_support)
3223 pci_info(dev, "supports%s%s\n",
3224 dev->d1_support ? " D1" : "",
3225 dev->d2_support ? " D2" : "");
3226 }
3227
3228 pmc &= PCI_PM_CAP_PME_MASK;
3229 if (pmc) {
3230 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3231 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3232 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3233 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3234 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3235 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3236 dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
3237 dev->pme_poll = true;
3238 /*
3239 * Make device's PM flags reflect the wake-up capability, but
3240 * let the user space enable it to wake up the system as needed.
3241 */
3242 device_set_wakeup_capable(&dev->dev, true);
3243 /* Disable the PME# generation functionality */
3244 pci_pme_active(dev, false);
3245 }
3246
3247 pci_read_config_word(dev, PCI_STATUS, &status);
3248 if (status & PCI_STATUS_IMM_READY)
3249 dev->imm_ready = 1;
3250 }
3251
pci_ea_flags(struct pci_dev * dev,u8 prop)3252 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3253 {
3254 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3255
3256 switch (prop) {
3257 case PCI_EA_P_MEM:
3258 case PCI_EA_P_VF_MEM:
3259 flags |= IORESOURCE_MEM;
3260 break;
3261 case PCI_EA_P_MEM_PREFETCH:
3262 case PCI_EA_P_VF_MEM_PREFETCH:
3263 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3264 break;
3265 case PCI_EA_P_IO:
3266 flags |= IORESOURCE_IO;
3267 break;
3268 default:
3269 return 0;
3270 }
3271
3272 return flags;
3273 }
3274
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)3275 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3276 u8 prop)
3277 {
3278 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3279 return &dev->resource[bei];
3280 #ifdef CONFIG_PCI_IOV
3281 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3282 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3283 return &dev->resource[PCI_IOV_RESOURCES +
3284 bei - PCI_EA_BEI_VF_BAR0];
3285 #endif
3286 else if (bei == PCI_EA_BEI_ROM)
3287 return &dev->resource[PCI_ROM_RESOURCE];
3288 else
3289 return NULL;
3290 }
3291
3292 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)3293 static int pci_ea_read(struct pci_dev *dev, int offset)
3294 {
3295 struct resource *res;
3296 const char *res_name;
3297 int ent_size, ent_offset = offset;
3298 resource_size_t start, end;
3299 unsigned long flags;
3300 u32 dw0, bei, base, max_offset;
3301 u8 prop;
3302 bool support_64 = (sizeof(resource_size_t) >= 8);
3303
3304 pci_read_config_dword(dev, ent_offset, &dw0);
3305 ent_offset += 4;
3306
3307 /* Entry size field indicates DWORDs after 1st */
3308 ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2;
3309
3310 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3311 goto out;
3312
3313 bei = FIELD_GET(PCI_EA_BEI, dw0);
3314 prop = FIELD_GET(PCI_EA_PP, dw0);
3315
3316 /*
3317 * If the Property is in the reserved range, try the Secondary
3318 * Property instead.
3319 */
3320 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3321 prop = FIELD_GET(PCI_EA_SP, dw0);
3322 if (prop > PCI_EA_P_BRIDGE_IO)
3323 goto out;
3324
3325 res = pci_ea_get_resource(dev, bei, prop);
3326 res_name = pci_resource_name(dev, bei);
3327 if (!res) {
3328 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3329 goto out;
3330 }
3331
3332 flags = pci_ea_flags(dev, prop);
3333 if (!flags) {
3334 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3335 goto out;
3336 }
3337
3338 /* Read Base */
3339 pci_read_config_dword(dev, ent_offset, &base);
3340 start = (base & PCI_EA_FIELD_MASK);
3341 ent_offset += 4;
3342
3343 /* Read MaxOffset */
3344 pci_read_config_dword(dev, ent_offset, &max_offset);
3345 ent_offset += 4;
3346
3347 /* Read Base MSBs (if 64-bit entry) */
3348 if (base & PCI_EA_IS_64) {
3349 u32 base_upper;
3350
3351 pci_read_config_dword(dev, ent_offset, &base_upper);
3352 ent_offset += 4;
3353
3354 flags |= IORESOURCE_MEM_64;
3355
3356 /* entry starts above 32-bit boundary, can't use */
3357 if (!support_64 && base_upper)
3358 goto out;
3359
3360 if (support_64)
3361 start |= ((u64)base_upper << 32);
3362 }
3363
3364 end = start + (max_offset | 0x03);
3365
3366 /* Read MaxOffset MSBs (if 64-bit entry) */
3367 if (max_offset & PCI_EA_IS_64) {
3368 u32 max_offset_upper;
3369
3370 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3371 ent_offset += 4;
3372
3373 flags |= IORESOURCE_MEM_64;
3374
3375 /* entry too big, can't use */
3376 if (!support_64 && max_offset_upper)
3377 goto out;
3378
3379 if (support_64)
3380 end += ((u64)max_offset_upper << 32);
3381 }
3382
3383 if (end < start) {
3384 pci_err(dev, "EA Entry crosses address boundary\n");
3385 goto out;
3386 }
3387
3388 if (ent_size != ent_offset - offset) {
3389 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3390 ent_size, ent_offset - offset);
3391 goto out;
3392 }
3393
3394 res->name = pci_name(dev);
3395 res->start = start;
3396 res->end = end;
3397 res->flags = flags;
3398
3399 if (bei <= PCI_EA_BEI_BAR5)
3400 pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3401 res_name, res, prop);
3402 else if (bei == PCI_EA_BEI_ROM)
3403 pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3404 res_name, res, prop);
3405 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3406 pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3407 res_name, res, prop);
3408 else
3409 pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
3410 bei, res, prop);
3411
3412 out:
3413 return offset + ent_size;
3414 }
3415
3416 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3417 void pci_ea_init(struct pci_dev *dev)
3418 {
3419 int ea;
3420 u8 num_ent;
3421 int offset;
3422 int i;
3423
3424 /* find PCI EA capability in list */
3425 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3426 if (!ea)
3427 return;
3428
3429 /* determine the number of entries */
3430 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3431 &num_ent);
3432 num_ent &= PCI_EA_NUM_ENT_MASK;
3433
3434 offset = ea + PCI_EA_FIRST_ENT;
3435
3436 /* Skip DWORD 2 for type 1 functions */
3437 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3438 offset += 4;
3439
3440 /* parse each EA entry */
3441 for (i = 0; i < num_ent; ++i)
3442 offset = pci_ea_read(dev, offset);
3443 }
3444
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3445 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3446 struct pci_cap_saved_state *new_cap)
3447 {
3448 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3449 }
3450
3451 /**
3452 * _pci_add_cap_save_buffer - allocate buffer for saving given
3453 * capability registers
3454 * @dev: the PCI device
3455 * @cap: the capability to allocate the buffer for
3456 * @extended: Standard or Extended capability ID
3457 * @size: requested size of the buffer
3458 */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3459 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3460 bool extended, unsigned int size)
3461 {
3462 int pos;
3463 struct pci_cap_saved_state *save_state;
3464
3465 if (extended)
3466 pos = pci_find_ext_capability(dev, cap);
3467 else
3468 pos = pci_find_capability(dev, cap);
3469
3470 if (!pos)
3471 return 0;
3472
3473 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3474 if (!save_state)
3475 return -ENOMEM;
3476
3477 save_state->cap.cap_nr = cap;
3478 save_state->cap.cap_extended = extended;
3479 save_state->cap.size = size;
3480 pci_add_saved_cap(dev, save_state);
3481
3482 return 0;
3483 }
3484
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3485 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3486 {
3487 return _pci_add_cap_save_buffer(dev, cap, false, size);
3488 }
3489
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3490 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3491 {
3492 return _pci_add_cap_save_buffer(dev, cap, true, size);
3493 }
3494
3495 /**
3496 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3497 * @dev: the PCI device
3498 */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3499 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3500 {
3501 int error;
3502
3503 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3504 PCI_EXP_SAVE_REGS * sizeof(u16));
3505 if (error)
3506 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3507
3508 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3509 if (error)
3510 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3511
3512 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3513 2 * sizeof(u16));
3514 if (error)
3515 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3516
3517 pci_allocate_vc_save_buffers(dev);
3518 }
3519
pci_free_cap_save_buffers(struct pci_dev * dev)3520 void pci_free_cap_save_buffers(struct pci_dev *dev)
3521 {
3522 struct pci_cap_saved_state *tmp;
3523 struct hlist_node *n;
3524
3525 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3526 kfree(tmp);
3527 }
3528
3529 /**
3530 * pci_configure_ari - enable or disable ARI forwarding
3531 * @dev: the PCI device
3532 *
3533 * If @dev and its upstream bridge both support ARI, enable ARI in the
3534 * bridge. Otherwise, disable ARI in the bridge.
3535 */
pci_configure_ari(struct pci_dev * dev)3536 void pci_configure_ari(struct pci_dev *dev)
3537 {
3538 u32 cap;
3539 struct pci_dev *bridge;
3540
3541 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3542 return;
3543
3544 bridge = dev->bus->self;
3545 if (!bridge)
3546 return;
3547
3548 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3549 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3550 return;
3551
3552 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3553 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3554 PCI_EXP_DEVCTL2_ARI);
3555 bridge->ari_enabled = 1;
3556 } else {
3557 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3558 PCI_EXP_DEVCTL2_ARI);
3559 bridge->ari_enabled = 0;
3560 }
3561 }
3562
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3563 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3564 {
3565 int pos;
3566 u16 cap, ctrl;
3567
3568 pos = pdev->acs_cap;
3569 if (!pos)
3570 return false;
3571
3572 /*
3573 * Except for egress control, capabilities are either required
3574 * or only required if controllable. Features missing from the
3575 * capability field can therefore be assumed as hard-wired enabled.
3576 */
3577 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3578 acs_flags &= (cap | PCI_ACS_EC);
3579
3580 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3581 return (ctrl & acs_flags) == acs_flags;
3582 }
3583
3584 /**
3585 * pci_acs_enabled - test ACS against required flags for a given device
3586 * @pdev: device to test
3587 * @acs_flags: required PCI ACS flags
3588 *
3589 * Return true if the device supports the provided flags. Automatically
3590 * filters out flags that are not implemented on multifunction devices.
3591 *
3592 * Note that this interface checks the effective ACS capabilities of the
3593 * device rather than the actual capabilities. For instance, most single
3594 * function endpoints are not required to support ACS because they have no
3595 * opportunity for peer-to-peer access. We therefore return 'true'
3596 * regardless of whether the device exposes an ACS capability. This makes
3597 * it much easier for callers of this function to ignore the actual type
3598 * or topology of the device when testing ACS support.
3599 */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3600 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3601 {
3602 int ret;
3603
3604 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3605 if (ret >= 0)
3606 return ret > 0;
3607
3608 /*
3609 * Conventional PCI and PCI-X devices never support ACS, either
3610 * effectively or actually. The shared bus topology implies that
3611 * any device on the bus can receive or snoop DMA.
3612 */
3613 if (!pci_is_pcie(pdev))
3614 return false;
3615
3616 switch (pci_pcie_type(pdev)) {
3617 /*
3618 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3619 * but since their primary interface is PCI/X, we conservatively
3620 * handle them as we would a non-PCIe device.
3621 */
3622 case PCI_EXP_TYPE_PCIE_BRIDGE:
3623 /*
3624 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
3625 * applicable... must never implement an ACS Extended Capability...".
3626 * This seems arbitrary, but we take a conservative interpretation
3627 * of this statement.
3628 */
3629 case PCI_EXP_TYPE_PCI_BRIDGE:
3630 case PCI_EXP_TYPE_RC_EC:
3631 return false;
3632 /*
3633 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3634 * implement ACS in order to indicate their peer-to-peer capabilities,
3635 * regardless of whether they are single- or multi-function devices.
3636 */
3637 case PCI_EXP_TYPE_DOWNSTREAM:
3638 case PCI_EXP_TYPE_ROOT_PORT:
3639 return pci_acs_flags_enabled(pdev, acs_flags);
3640 /*
3641 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3642 * implemented by the remaining PCIe types to indicate peer-to-peer
3643 * capabilities, but only when they are part of a multifunction
3644 * device. The footnote for section 6.12 indicates the specific
3645 * PCIe types included here.
3646 */
3647 case PCI_EXP_TYPE_ENDPOINT:
3648 case PCI_EXP_TYPE_UPSTREAM:
3649 case PCI_EXP_TYPE_LEG_END:
3650 case PCI_EXP_TYPE_RC_END:
3651 if (!pdev->multifunction)
3652 break;
3653
3654 return pci_acs_flags_enabled(pdev, acs_flags);
3655 }
3656
3657 /*
3658 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3659 * to single function devices with the exception of downstream ports.
3660 */
3661 return true;
3662 }
3663
3664 /**
3665 * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3666 * @start: starting downstream device
3667 * @end: ending upstream device or NULL to search to the root bus
3668 * @acs_flags: required flags
3669 *
3670 * Walk up a device tree from start to end testing PCI ACS support. If
3671 * any step along the way does not support the required flags, return false.
3672 */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3673 bool pci_acs_path_enabled(struct pci_dev *start,
3674 struct pci_dev *end, u16 acs_flags)
3675 {
3676 struct pci_dev *pdev, *parent = start;
3677
3678 do {
3679 pdev = parent;
3680
3681 if (!pci_acs_enabled(pdev, acs_flags))
3682 return false;
3683
3684 if (pci_is_root_bus(pdev->bus))
3685 return (end == NULL);
3686
3687 parent = pdev->bus->self;
3688 } while (pdev != end);
3689
3690 return true;
3691 }
3692
3693 /**
3694 * pci_acs_init - Initialize ACS if hardware supports it
3695 * @dev: the PCI device
3696 */
pci_acs_init(struct pci_dev * dev)3697 void pci_acs_init(struct pci_dev *dev)
3698 {
3699 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3700
3701 /*
3702 * Attempt to enable ACS regardless of capability because some Root
3703 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3704 * the standard ACS capability but still support ACS via those
3705 * quirks.
3706 */
3707 pci_enable_acs(dev);
3708 }
3709
3710 /**
3711 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3712 * @pdev: PCI device
3713 * @bar: BAR to find
3714 *
3715 * Helper to find the position of the ctrl register for a BAR.
3716 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3717 * Returns -ENOENT if no ctrl register for the BAR could be found.
3718 */
pci_rebar_find_pos(struct pci_dev * pdev,int bar)3719 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3720 {
3721 unsigned int pos, nbars, i;
3722 u32 ctrl;
3723
3724 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3725 if (!pos)
3726 return -ENOTSUPP;
3727
3728 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3729 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
3730
3731 for (i = 0; i < nbars; i++, pos += 8) {
3732 int bar_idx;
3733
3734 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3735 bar_idx = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, ctrl);
3736 if (bar_idx == bar)
3737 return pos;
3738 }
3739
3740 return -ENOENT;
3741 }
3742
3743 /**
3744 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3745 * @pdev: PCI device
3746 * @bar: BAR to query
3747 *
3748 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3749 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3750 */
pci_rebar_get_possible_sizes(struct pci_dev * pdev,int bar)3751 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3752 {
3753 int pos;
3754 u32 cap;
3755
3756 pos = pci_rebar_find_pos(pdev, bar);
3757 if (pos < 0)
3758 return 0;
3759
3760 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3761 cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
3762
3763 /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3764 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3765 bar == 0 && cap == 0x700)
3766 return 0x3f00;
3767
3768 return cap;
3769 }
3770 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3771
3772 /**
3773 * pci_rebar_get_current_size - get the current size of a BAR
3774 * @pdev: PCI device
3775 * @bar: BAR to set size to
3776 *
3777 * Read the size of a BAR from the resizable BAR config.
3778 * Returns size if found or negative error code.
3779 */
pci_rebar_get_current_size(struct pci_dev * pdev,int bar)3780 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3781 {
3782 int pos;
3783 u32 ctrl;
3784
3785 pos = pci_rebar_find_pos(pdev, bar);
3786 if (pos < 0)
3787 return pos;
3788
3789 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3790 return FIELD_GET(PCI_REBAR_CTRL_BAR_SIZE, ctrl);
3791 }
3792
3793 /**
3794 * pci_rebar_set_size - set a new size for a BAR
3795 * @pdev: PCI device
3796 * @bar: BAR to set size to
3797 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3798 *
3799 * Set the new size of a BAR as defined in the spec.
3800 * Returns zero if resizing was successful, error code otherwise.
3801 */
pci_rebar_set_size(struct pci_dev * pdev,int bar,int size)3802 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3803 {
3804 int pos;
3805 u32 ctrl;
3806
3807 pos = pci_rebar_find_pos(pdev, bar);
3808 if (pos < 0)
3809 return pos;
3810
3811 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3812 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3813 ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
3814 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3815 return 0;
3816 }
3817
3818 /**
3819 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3820 * @dev: the PCI device
3821 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3822 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3823 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3824 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3825 *
3826 * Return 0 if all upstream bridges support AtomicOp routing, egress
3827 * blocking is disabled on all upstream ports, and the root port supports
3828 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3829 * AtomicOp completion), or negative otherwise.
3830 */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3831 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3832 {
3833 struct pci_bus *bus = dev->bus;
3834 struct pci_dev *bridge;
3835 u32 cap, ctl2;
3836
3837 /*
3838 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3839 * in Device Control 2 is reserved in VFs and the PF value applies
3840 * to all associated VFs.
3841 */
3842 if (dev->is_virtfn)
3843 return -EINVAL;
3844
3845 if (!pci_is_pcie(dev))
3846 return -EINVAL;
3847
3848 /*
3849 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3850 * AtomicOp requesters. For now, we only support endpoints as
3851 * requesters and root ports as completers. No endpoints as
3852 * completers, and no peer-to-peer.
3853 */
3854
3855 switch (pci_pcie_type(dev)) {
3856 case PCI_EXP_TYPE_ENDPOINT:
3857 case PCI_EXP_TYPE_LEG_END:
3858 case PCI_EXP_TYPE_RC_END:
3859 break;
3860 default:
3861 return -EINVAL;
3862 }
3863
3864 while (bus->parent) {
3865 bridge = bus->self;
3866
3867 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3868
3869 switch (pci_pcie_type(bridge)) {
3870 /* Ensure switch ports support AtomicOp routing */
3871 case PCI_EXP_TYPE_UPSTREAM:
3872 case PCI_EXP_TYPE_DOWNSTREAM:
3873 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3874 return -EINVAL;
3875 break;
3876
3877 /* Ensure root port supports all the sizes we care about */
3878 case PCI_EXP_TYPE_ROOT_PORT:
3879 if ((cap & cap_mask) != cap_mask)
3880 return -EINVAL;
3881 break;
3882 }
3883
3884 /* Ensure upstream ports don't block AtomicOps on egress */
3885 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3886 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3887 &ctl2);
3888 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3889 return -EINVAL;
3890 }
3891
3892 bus = bus->parent;
3893 }
3894
3895 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3896 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3897 return 0;
3898 }
3899 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3900
3901 /**
3902 * pci_release_region - Release a PCI bar
3903 * @pdev: PCI device whose resources were previously reserved by
3904 * pci_request_region()
3905 * @bar: BAR to release
3906 *
3907 * Releases the PCI I/O and memory resources previously reserved by a
3908 * successful call to pci_request_region(). Call this function only
3909 * after all use of the PCI regions has ceased.
3910 */
pci_release_region(struct pci_dev * pdev,int bar)3911 void pci_release_region(struct pci_dev *pdev, int bar)
3912 {
3913 /*
3914 * This is done for backwards compatibility, because the old PCI devres
3915 * API had a mode in which the function became managed if it had been
3916 * enabled with pcim_enable_device() instead of pci_enable_device().
3917 */
3918 if (pci_is_managed(pdev)) {
3919 pcim_release_region(pdev, bar);
3920 return;
3921 }
3922
3923 if (pci_resource_len(pdev, bar) == 0)
3924 return;
3925 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3926 release_region(pci_resource_start(pdev, bar),
3927 pci_resource_len(pdev, bar));
3928 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3929 release_mem_region(pci_resource_start(pdev, bar),
3930 pci_resource_len(pdev, bar));
3931 }
3932 EXPORT_SYMBOL(pci_release_region);
3933
3934 /**
3935 * __pci_request_region - Reserved PCI I/O and memory resource
3936 * @pdev: PCI device whose resources are to be reserved
3937 * @bar: BAR to be reserved
3938 * @res_name: Name to be associated with resource.
3939 * @exclusive: whether the region access is exclusive or not
3940 *
3941 * Returns: 0 on success, negative error code on failure.
3942 *
3943 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3944 * being reserved by owner @res_name. Do not access any
3945 * address inside the PCI regions unless this call returns
3946 * successfully.
3947 *
3948 * If @exclusive is set, then the region is marked so that userspace
3949 * is explicitly not allowed to map the resource via /dev/mem or
3950 * sysfs MMIO access.
3951 *
3952 * Returns 0 on success, or %EBUSY on error. A warning
3953 * message is also printed on failure.
3954 */
__pci_request_region(struct pci_dev * pdev,int bar,const char * res_name,int exclusive)3955 static int __pci_request_region(struct pci_dev *pdev, int bar,
3956 const char *res_name, int exclusive)
3957 {
3958 if (pci_is_managed(pdev)) {
3959 if (exclusive == IORESOURCE_EXCLUSIVE)
3960 return pcim_request_region_exclusive(pdev, bar, res_name);
3961
3962 return pcim_request_region(pdev, bar, res_name);
3963 }
3964
3965 if (pci_resource_len(pdev, bar) == 0)
3966 return 0;
3967
3968 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3969 if (!request_region(pci_resource_start(pdev, bar),
3970 pci_resource_len(pdev, bar), res_name))
3971 goto err_out;
3972 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3973 if (!__request_mem_region(pci_resource_start(pdev, bar),
3974 pci_resource_len(pdev, bar), res_name,
3975 exclusive))
3976 goto err_out;
3977 }
3978
3979 return 0;
3980
3981 err_out:
3982 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3983 &pdev->resource[bar]);
3984 return -EBUSY;
3985 }
3986
3987 /**
3988 * pci_request_region - Reserve PCI I/O and memory resource
3989 * @pdev: PCI device whose resources are to be reserved
3990 * @bar: BAR to be reserved
3991 * @res_name: Name to be associated with resource
3992 *
3993 * Returns: 0 on success, negative error code on failure.
3994 *
3995 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3996 * being reserved by owner @res_name. Do not access any
3997 * address inside the PCI regions unless this call returns
3998 * successfully.
3999 *
4000 * Returns 0 on success, or %EBUSY on error. A warning
4001 * message is also printed on failure.
4002 *
4003 * NOTE:
4004 * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4005 * when pcim_enable_device() has been called in advance. This hybrid feature is
4006 * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4007 */
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)4008 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
4009 {
4010 return __pci_request_region(pdev, bar, res_name, 0);
4011 }
4012 EXPORT_SYMBOL(pci_request_region);
4013
4014 /**
4015 * pci_release_selected_regions - Release selected PCI I/O and memory resources
4016 * @pdev: PCI device whose resources were previously reserved
4017 * @bars: Bitmask of BARs to be released
4018 *
4019 * Release selected PCI I/O and memory resources previously reserved.
4020 * Call this function only after all use of the PCI regions has ceased.
4021 */
pci_release_selected_regions(struct pci_dev * pdev,int bars)4022 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
4023 {
4024 int i;
4025
4026 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4027 if (bars & (1 << i))
4028 pci_release_region(pdev, i);
4029 }
4030 EXPORT_SYMBOL(pci_release_selected_regions);
4031
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name,int excl)4032 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4033 const char *res_name, int excl)
4034 {
4035 int i;
4036
4037 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4038 if (bars & (1 << i))
4039 if (__pci_request_region(pdev, i, res_name, excl))
4040 goto err_out;
4041 return 0;
4042
4043 err_out:
4044 while (--i >= 0)
4045 if (bars & (1 << i))
4046 pci_release_region(pdev, i);
4047
4048 return -EBUSY;
4049 }
4050
4051
4052 /**
4053 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4054 * @pdev: PCI device whose resources are to be reserved
4055 * @bars: Bitmask of BARs to be requested
4056 * @res_name: Name to be associated with resource
4057 *
4058 * Returns: 0 on success, negative error code on failure.
4059 *
4060 * NOTE:
4061 * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4062 * when pcim_enable_device() has been called in advance. This hybrid feature is
4063 * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4064 */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name)4065 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4066 const char *res_name)
4067 {
4068 return __pci_request_selected_regions(pdev, bars, res_name, 0);
4069 }
4070 EXPORT_SYMBOL(pci_request_selected_regions);
4071
4072 /**
4073 * pci_request_selected_regions_exclusive - Request regions exclusively
4074 * @pdev: PCI device to request regions from
4075 * @bars: bit mask of BARs to request
4076 * @res_name: name to be associated with the requests
4077 *
4078 * Returns: 0 on success, negative error code on failure.
4079 *
4080 * NOTE:
4081 * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4082 * when pcim_enable_device() has been called in advance. This hybrid feature is
4083 * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4084 */
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * res_name)4085 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4086 const char *res_name)
4087 {
4088 return __pci_request_selected_regions(pdev, bars, res_name,
4089 IORESOURCE_EXCLUSIVE);
4090 }
4091 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4092
4093 /**
4094 * pci_release_regions - Release reserved PCI I/O and memory resources
4095 * @pdev: PCI device whose resources were previously reserved by
4096 * pci_request_regions()
4097 *
4098 * Releases all PCI I/O and memory resources previously reserved by a
4099 * successful call to pci_request_regions(). Call this function only
4100 * after all use of the PCI regions has ceased.
4101 */
pci_release_regions(struct pci_dev * pdev)4102 void pci_release_regions(struct pci_dev *pdev)
4103 {
4104 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4105 }
4106 EXPORT_SYMBOL(pci_release_regions);
4107
4108 /**
4109 * pci_request_regions - Reserve PCI I/O and memory resources
4110 * @pdev: PCI device whose resources are to be reserved
4111 * @res_name: Name to be associated with resource.
4112 *
4113 * Mark all PCI regions associated with PCI device @pdev as
4114 * being reserved by owner @res_name. Do not access any
4115 * address inside the PCI regions unless this call returns
4116 * successfully.
4117 *
4118 * Returns 0 on success, or %EBUSY on error. A warning
4119 * message is also printed on failure.
4120 *
4121 * NOTE:
4122 * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4123 * when pcim_enable_device() has been called in advance. This hybrid feature is
4124 * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4125 */
pci_request_regions(struct pci_dev * pdev,const char * res_name)4126 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4127 {
4128 return pci_request_selected_regions(pdev,
4129 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4130 }
4131 EXPORT_SYMBOL(pci_request_regions);
4132
4133 /**
4134 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4135 * @pdev: PCI device whose resources are to be reserved
4136 * @res_name: Name to be associated with resource.
4137 *
4138 * Returns: 0 on success, negative error code on failure.
4139 *
4140 * Mark all PCI regions associated with PCI device @pdev as being reserved
4141 * by owner @res_name. Do not access any address inside the PCI regions
4142 * unless this call returns successfully.
4143 *
4144 * pci_request_regions_exclusive() will mark the region so that /dev/mem
4145 * and the sysfs MMIO access will not be allowed.
4146 *
4147 * Returns 0 on success, or %EBUSY on error. A warning message is also
4148 * printed on failure.
4149 *
4150 * NOTE:
4151 * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4152 * when pcim_enable_device() has been called in advance. This hybrid feature is
4153 * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4154 */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * res_name)4155 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4156 {
4157 return pci_request_selected_regions_exclusive(pdev,
4158 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4159 }
4160 EXPORT_SYMBOL(pci_request_regions_exclusive);
4161
4162 /*
4163 * Record the PCI IO range (expressed as CPU physical address + size).
4164 * Return a negative value if an error has occurred, zero otherwise
4165 */
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)4166 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4167 resource_size_t size)
4168 {
4169 int ret = 0;
4170 #ifdef PCI_IOBASE
4171 struct logic_pio_hwaddr *range;
4172
4173 if (!size || addr + size < addr)
4174 return -EINVAL;
4175
4176 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4177 if (!range)
4178 return -ENOMEM;
4179
4180 range->fwnode = fwnode;
4181 range->size = size;
4182 range->hw_start = addr;
4183 range->flags = LOGIC_PIO_CPU_MMIO;
4184
4185 ret = logic_pio_register_range(range);
4186 if (ret)
4187 kfree(range);
4188
4189 /* Ignore duplicates due to deferred probing */
4190 if (ret == -EEXIST)
4191 ret = 0;
4192 #endif
4193
4194 return ret;
4195 }
4196
pci_pio_to_address(unsigned long pio)4197 phys_addr_t pci_pio_to_address(unsigned long pio)
4198 {
4199 #ifdef PCI_IOBASE
4200 if (pio < MMIO_UPPER_LIMIT)
4201 return logic_pio_to_hwaddr(pio);
4202 #endif
4203
4204 return (phys_addr_t) OF_BAD_ADDR;
4205 }
4206 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4207
pci_address_to_pio(phys_addr_t address)4208 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4209 {
4210 #ifdef PCI_IOBASE
4211 return logic_pio_trans_cpuaddr(address);
4212 #else
4213 if (address > IO_SPACE_LIMIT)
4214 return (unsigned long)-1;
4215
4216 return (unsigned long) address;
4217 #endif
4218 }
4219
4220 /**
4221 * pci_remap_iospace - Remap the memory mapped I/O space
4222 * @res: Resource describing the I/O space
4223 * @phys_addr: physical address of range to be mapped
4224 *
4225 * Remap the memory mapped I/O space described by the @res and the CPU
4226 * physical address @phys_addr into virtual address space. Only
4227 * architectures that have memory mapped IO functions defined (and the
4228 * PCI_IOBASE value defined) should call this function.
4229 */
4230 #ifndef pci_remap_iospace
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)4231 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4232 {
4233 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4234 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4235
4236 if (!(res->flags & IORESOURCE_IO))
4237 return -EINVAL;
4238
4239 if (res->end > IO_SPACE_LIMIT)
4240 return -EINVAL;
4241
4242 return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4243 pgprot_device(PAGE_KERNEL));
4244 #else
4245 /*
4246 * This architecture does not have memory mapped I/O space,
4247 * so this function should never be called
4248 */
4249 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4250 return -ENODEV;
4251 #endif
4252 }
4253 EXPORT_SYMBOL(pci_remap_iospace);
4254 #endif
4255
4256 /**
4257 * pci_unmap_iospace - Unmap the memory mapped I/O space
4258 * @res: resource to be unmapped
4259 *
4260 * Unmap the CPU virtual address @res from virtual address space. Only
4261 * architectures that have memory mapped IO functions defined (and the
4262 * PCI_IOBASE value defined) should call this function.
4263 */
pci_unmap_iospace(struct resource * res)4264 void pci_unmap_iospace(struct resource *res)
4265 {
4266 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4267 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4268
4269 vunmap_range(vaddr, vaddr + resource_size(res));
4270 #endif
4271 }
4272 EXPORT_SYMBOL(pci_unmap_iospace);
4273
__pci_set_master(struct pci_dev * dev,bool enable)4274 static void __pci_set_master(struct pci_dev *dev, bool enable)
4275 {
4276 u16 old_cmd, cmd;
4277
4278 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4279 if (enable)
4280 cmd = old_cmd | PCI_COMMAND_MASTER;
4281 else
4282 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4283 if (cmd != old_cmd) {
4284 pci_dbg(dev, "%s bus mastering\n",
4285 enable ? "enabling" : "disabling");
4286 pci_write_config_word(dev, PCI_COMMAND, cmd);
4287 }
4288 dev->is_busmaster = enable;
4289 }
4290
4291 /**
4292 * pcibios_setup - process "pci=" kernel boot arguments
4293 * @str: string used to pass in "pci=" kernel boot arguments
4294 *
4295 * Process kernel boot arguments. This is the default implementation.
4296 * Architecture specific implementations can override this as necessary.
4297 */
pcibios_setup(char * str)4298 char * __weak __init pcibios_setup(char *str)
4299 {
4300 return str;
4301 }
4302
4303 /**
4304 * pcibios_set_master - enable PCI bus-mastering for device dev
4305 * @dev: the PCI device to enable
4306 *
4307 * Enables PCI bus-mastering for the device. This is the default
4308 * implementation. Architecture specific implementations can override
4309 * this if necessary.
4310 */
pcibios_set_master(struct pci_dev * dev)4311 void __weak pcibios_set_master(struct pci_dev *dev)
4312 {
4313 u8 lat;
4314
4315 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4316 if (pci_is_pcie(dev))
4317 return;
4318
4319 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4320 if (lat < 16)
4321 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4322 else if (lat > pcibios_max_latency)
4323 lat = pcibios_max_latency;
4324 else
4325 return;
4326
4327 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4328 }
4329
4330 /**
4331 * pci_set_master - enables bus-mastering for device dev
4332 * @dev: the PCI device to enable
4333 *
4334 * Enables bus-mastering on the device and calls pcibios_set_master()
4335 * to do the needed arch specific settings.
4336 */
pci_set_master(struct pci_dev * dev)4337 void pci_set_master(struct pci_dev *dev)
4338 {
4339 __pci_set_master(dev, true);
4340 pcibios_set_master(dev);
4341 }
4342 EXPORT_SYMBOL(pci_set_master);
4343
4344 /**
4345 * pci_clear_master - disables bus-mastering for device dev
4346 * @dev: the PCI device to disable
4347 */
pci_clear_master(struct pci_dev * dev)4348 void pci_clear_master(struct pci_dev *dev)
4349 {
4350 __pci_set_master(dev, false);
4351 }
4352 EXPORT_SYMBOL(pci_clear_master);
4353
4354 /**
4355 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4356 * @dev: the PCI device for which MWI is to be enabled
4357 *
4358 * Helper function for pci_set_mwi.
4359 * Originally copied from drivers/net/acenic.c.
4360 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4361 *
4362 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4363 */
pci_set_cacheline_size(struct pci_dev * dev)4364 int pci_set_cacheline_size(struct pci_dev *dev)
4365 {
4366 u8 cacheline_size;
4367
4368 if (!pci_cache_line_size)
4369 return -EINVAL;
4370
4371 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4372 equal to or multiple of the right value. */
4373 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4374 if (cacheline_size >= pci_cache_line_size &&
4375 (cacheline_size % pci_cache_line_size) == 0)
4376 return 0;
4377
4378 /* Write the correct value. */
4379 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4380 /* Read it back. */
4381 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4382 if (cacheline_size == pci_cache_line_size)
4383 return 0;
4384
4385 pci_dbg(dev, "cache line size of %d is not supported\n",
4386 pci_cache_line_size << 2);
4387
4388 return -EINVAL;
4389 }
4390 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4391
4392 /**
4393 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4394 * @dev: the PCI device for which MWI is enabled
4395 *
4396 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4397 *
4398 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4399 */
pci_set_mwi(struct pci_dev * dev)4400 int pci_set_mwi(struct pci_dev *dev)
4401 {
4402 #ifdef PCI_DISABLE_MWI
4403 return 0;
4404 #else
4405 int rc;
4406 u16 cmd;
4407
4408 rc = pci_set_cacheline_size(dev);
4409 if (rc)
4410 return rc;
4411
4412 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4413 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4414 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4415 cmd |= PCI_COMMAND_INVALIDATE;
4416 pci_write_config_word(dev, PCI_COMMAND, cmd);
4417 }
4418 return 0;
4419 #endif
4420 }
4421 EXPORT_SYMBOL(pci_set_mwi);
4422
4423 /**
4424 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4425 * @dev: the PCI device for which MWI is enabled
4426 *
4427 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4428 * Callers are not required to check the return value.
4429 *
4430 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4431 */
pci_try_set_mwi(struct pci_dev * dev)4432 int pci_try_set_mwi(struct pci_dev *dev)
4433 {
4434 #ifdef PCI_DISABLE_MWI
4435 return 0;
4436 #else
4437 return pci_set_mwi(dev);
4438 #endif
4439 }
4440 EXPORT_SYMBOL(pci_try_set_mwi);
4441
4442 /**
4443 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4444 * @dev: the PCI device to disable
4445 *
4446 * Disables PCI Memory-Write-Invalidate transaction on the device
4447 */
pci_clear_mwi(struct pci_dev * dev)4448 void pci_clear_mwi(struct pci_dev *dev)
4449 {
4450 #ifndef PCI_DISABLE_MWI
4451 u16 cmd;
4452
4453 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4454 if (cmd & PCI_COMMAND_INVALIDATE) {
4455 cmd &= ~PCI_COMMAND_INVALIDATE;
4456 pci_write_config_word(dev, PCI_COMMAND, cmd);
4457 }
4458 #endif
4459 }
4460 EXPORT_SYMBOL(pci_clear_mwi);
4461
4462 /**
4463 * pci_disable_parity - disable parity checking for device
4464 * @dev: the PCI device to operate on
4465 *
4466 * Disable parity checking for device @dev
4467 */
pci_disable_parity(struct pci_dev * dev)4468 void pci_disable_parity(struct pci_dev *dev)
4469 {
4470 u16 cmd;
4471
4472 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4473 if (cmd & PCI_COMMAND_PARITY) {
4474 cmd &= ~PCI_COMMAND_PARITY;
4475 pci_write_config_word(dev, PCI_COMMAND, cmd);
4476 }
4477 }
4478
4479 /**
4480 * pci_intx - enables/disables PCI INTx for device dev
4481 * @pdev: the PCI device to operate on
4482 * @enable: boolean: whether to enable or disable PCI INTx
4483 *
4484 * Enables/disables PCI INTx for device @pdev
4485 *
4486 * NOTE:
4487 * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4488 * when pcim_enable_device() has been called in advance. This hybrid feature is
4489 * DEPRECATED! If you want managed cleanup, use pcim_intx() instead.
4490 */
pci_intx(struct pci_dev * pdev,int enable)4491 void pci_intx(struct pci_dev *pdev, int enable)
4492 {
4493 u16 pci_command, new;
4494
4495 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4496
4497 if (enable)
4498 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4499 else
4500 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4501
4502 if (new != pci_command) {
4503 /* Preserve the "hybrid" behavior for backwards compatibility */
4504 if (pci_is_managed(pdev)) {
4505 WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
4506 return;
4507 }
4508
4509 pci_write_config_word(pdev, PCI_COMMAND, new);
4510 }
4511 }
4512 EXPORT_SYMBOL_GPL(pci_intx);
4513
4514 /**
4515 * pci_wait_for_pending_transaction - wait for pending transaction
4516 * @dev: the PCI device to operate on
4517 *
4518 * Return 0 if transaction is pending 1 otherwise.
4519 */
pci_wait_for_pending_transaction(struct pci_dev * dev)4520 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4521 {
4522 if (!pci_is_pcie(dev))
4523 return 1;
4524
4525 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4526 PCI_EXP_DEVSTA_TRPND);
4527 }
4528 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4529
4530 /**
4531 * pcie_flr - initiate a PCIe function level reset
4532 * @dev: device to reset
4533 *
4534 * Initiate a function level reset unconditionally on @dev without
4535 * checking any flags and DEVCAP
4536 */
pcie_flr(struct pci_dev * dev)4537 int pcie_flr(struct pci_dev *dev)
4538 {
4539 if (!pci_wait_for_pending_transaction(dev))
4540 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4541
4542 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4543
4544 if (dev->imm_ready)
4545 return 0;
4546
4547 /*
4548 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4549 * 100ms, but may silently discard requests while the FLR is in
4550 * progress. Wait 100ms before trying to access the device.
4551 */
4552 msleep(100);
4553
4554 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4555 }
4556 EXPORT_SYMBOL_GPL(pcie_flr);
4557
4558 /**
4559 * pcie_reset_flr - initiate a PCIe function level reset
4560 * @dev: device to reset
4561 * @probe: if true, return 0 if device can be reset this way
4562 *
4563 * Initiate a function level reset on @dev.
4564 */
pcie_reset_flr(struct pci_dev * dev,bool probe)4565 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4566 {
4567 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4568 return -ENOTTY;
4569
4570 if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4571 return -ENOTTY;
4572
4573 if (probe)
4574 return 0;
4575
4576 return pcie_flr(dev);
4577 }
4578 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4579
pci_af_flr(struct pci_dev * dev,bool probe)4580 static int pci_af_flr(struct pci_dev *dev, bool probe)
4581 {
4582 int pos;
4583 u8 cap;
4584
4585 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4586 if (!pos)
4587 return -ENOTTY;
4588
4589 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4590 return -ENOTTY;
4591
4592 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4593 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4594 return -ENOTTY;
4595
4596 if (probe)
4597 return 0;
4598
4599 /*
4600 * Wait for Transaction Pending bit to clear. A word-aligned test
4601 * is used, so we use the control offset rather than status and shift
4602 * the test bit to match.
4603 */
4604 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4605 PCI_AF_STATUS_TP << 8))
4606 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4607
4608 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4609
4610 if (dev->imm_ready)
4611 return 0;
4612
4613 /*
4614 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4615 * updated 27 July 2006; a device must complete an FLR within
4616 * 100ms, but may silently discard requests while the FLR is in
4617 * progress. Wait 100ms before trying to access the device.
4618 */
4619 msleep(100);
4620
4621 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4622 }
4623
4624 /**
4625 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4626 * @dev: Device to reset.
4627 * @probe: if true, return 0 if the device can be reset this way.
4628 *
4629 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4630 * unset, it will be reinitialized internally when going from PCI_D3hot to
4631 * PCI_D0. If that's the case and the device is not in a low-power state
4632 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4633 *
4634 * NOTE: This causes the caller to sleep for twice the device power transition
4635 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4636 * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4637 * Moreover, only devices in D0 can be reset by this function.
4638 */
pci_pm_reset(struct pci_dev * dev,bool probe)4639 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4640 {
4641 u16 csr;
4642
4643 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4644 return -ENOTTY;
4645
4646 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4647 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4648 return -ENOTTY;
4649
4650 if (probe)
4651 return 0;
4652
4653 if (dev->current_state != PCI_D0)
4654 return -EINVAL;
4655
4656 csr &= ~PCI_PM_CTRL_STATE_MASK;
4657 csr |= PCI_D3hot;
4658 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4659 pci_dev_d3_sleep(dev);
4660
4661 csr &= ~PCI_PM_CTRL_STATE_MASK;
4662 csr |= PCI_D0;
4663 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4664 pci_dev_d3_sleep(dev);
4665
4666 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4667 }
4668
4669 /**
4670 * pcie_wait_for_link_status - Wait for link status change
4671 * @pdev: Device whose link to wait for.
4672 * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE.
4673 * @active: Waiting for active or inactive?
4674 *
4675 * Return 0 if successful, or -ETIMEDOUT if status has not changed within
4676 * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4677 */
pcie_wait_for_link_status(struct pci_dev * pdev,bool use_lt,bool active)4678 static int pcie_wait_for_link_status(struct pci_dev *pdev,
4679 bool use_lt, bool active)
4680 {
4681 u16 lnksta_mask, lnksta_match;
4682 unsigned long end_jiffies;
4683 u16 lnksta;
4684
4685 lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA;
4686 lnksta_match = active ? lnksta_mask : 0;
4687
4688 end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS);
4689 do {
4690 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
4691 if ((lnksta & lnksta_mask) == lnksta_match)
4692 return 0;
4693 msleep(1);
4694 } while (time_before(jiffies, end_jiffies));
4695
4696 return -ETIMEDOUT;
4697 }
4698
4699 /**
4700 * pcie_retrain_link - Request a link retrain and wait for it to complete
4701 * @pdev: Device whose link to retrain.
4702 * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
4703 *
4704 * Retrain completion status is retrieved from the Link Status Register
4705 * according to @use_lt. It is not verified whether the use of the DLLLA
4706 * bit is valid.
4707 *
4708 * Return 0 if successful, or -ETIMEDOUT if training has not completed
4709 * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4710 */
pcie_retrain_link(struct pci_dev * pdev,bool use_lt)4711 int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
4712 {
4713 int rc;
4714
4715 /*
4716 * Ensure the updated LNKCTL parameters are used during link
4717 * training by checking that there is no ongoing link training that
4718 * may have started before link parameters were changed, so as to
4719 * avoid LTSSM race as recommended in Implementation Note at the end
4720 * of PCIe r6.1 sec 7.5.3.7.
4721 */
4722 rc = pcie_wait_for_link_status(pdev, true, false);
4723 if (rc)
4724 return rc;
4725
4726 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4727 if (pdev->clear_retrain_link) {
4728 /*
4729 * Due to an erratum in some devices the Retrain Link bit
4730 * needs to be cleared again manually to allow the link
4731 * training to succeed.
4732 */
4733 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4734 }
4735
4736 rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
4737
4738 /*
4739 * Clear LBMS after a manual retrain so that the bit can be used
4740 * to track link speed or width changes made by hardware itself
4741 * in attempt to correct unreliable link operation.
4742 */
4743 pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
4744 return rc;
4745 }
4746
4747 /**
4748 * pcie_wait_for_link_delay - Wait until link is active or inactive
4749 * @pdev: Bridge device
4750 * @active: waiting for active or inactive?
4751 * @delay: Delay to wait after link has become active (in ms)
4752 *
4753 * Use this to wait till link becomes active or inactive.
4754 */
pcie_wait_for_link_delay(struct pci_dev * pdev,bool active,int delay)4755 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4756 int delay)
4757 {
4758 int rc;
4759
4760 /*
4761 * Some controllers might not implement link active reporting. In this
4762 * case, we wait for 1000 ms + any delay requested by the caller.
4763 */
4764 if (!pdev->link_active_reporting) {
4765 msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay);
4766 return true;
4767 }
4768
4769 /*
4770 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4771 * after which we should expect an link active if the reset was
4772 * successful. If so, software must wait a minimum 100ms before sending
4773 * configuration requests to devices downstream this port.
4774 *
4775 * If the link fails to activate, either the device was physically
4776 * removed or the link is permanently failed.
4777 */
4778 if (active)
4779 msleep(20);
4780 rc = pcie_wait_for_link_status(pdev, false, active);
4781 if (active) {
4782 if (rc)
4783 rc = pcie_failed_link_retrain(pdev);
4784 if (rc)
4785 return false;
4786
4787 msleep(delay);
4788 return true;
4789 }
4790
4791 if (rc)
4792 return false;
4793
4794 return true;
4795 }
4796
4797 /**
4798 * pcie_wait_for_link - Wait until link is active or inactive
4799 * @pdev: Bridge device
4800 * @active: waiting for active or inactive?
4801 *
4802 * Use this to wait till link becomes active or inactive.
4803 */
pcie_wait_for_link(struct pci_dev * pdev,bool active)4804 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4805 {
4806 return pcie_wait_for_link_delay(pdev, active, 100);
4807 }
4808
4809 /*
4810 * Find maximum D3cold delay required by all the devices on the bus. The
4811 * spec says 100 ms, but firmware can lower it and we allow drivers to
4812 * increase it as well.
4813 *
4814 * Called with @pci_bus_sem locked for reading.
4815 */
pci_bus_max_d3cold_delay(const struct pci_bus * bus)4816 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4817 {
4818 const struct pci_dev *pdev;
4819 int min_delay = 100;
4820 int max_delay = 0;
4821
4822 list_for_each_entry(pdev, &bus->devices, bus_list) {
4823 if (pdev->d3cold_delay < min_delay)
4824 min_delay = pdev->d3cold_delay;
4825 if (pdev->d3cold_delay > max_delay)
4826 max_delay = pdev->d3cold_delay;
4827 }
4828
4829 return max(min_delay, max_delay);
4830 }
4831
4832 /**
4833 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4834 * @dev: PCI bridge
4835 * @reset_type: reset type in human-readable form
4836 *
4837 * Handle necessary delays before access to the devices on the secondary
4838 * side of the bridge are permitted after D3cold to D0 transition
4839 * or Conventional Reset.
4840 *
4841 * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4842 * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4843 * 4.3.2.
4844 *
4845 * Return 0 on success or -ENOTTY if the first device on the secondary bus
4846 * failed to become accessible.
4847 */
pci_bridge_wait_for_secondary_bus(struct pci_dev * dev,char * reset_type)4848 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
4849 {
4850 struct pci_dev *child __free(pci_dev_put) = NULL;
4851 int delay;
4852
4853 if (pci_dev_is_disconnected(dev))
4854 return 0;
4855
4856 if (!pci_is_bridge(dev))
4857 return 0;
4858
4859 down_read(&pci_bus_sem);
4860
4861 /*
4862 * We only deal with devices that are present currently on the bus.
4863 * For any hot-added devices the access delay is handled in pciehp
4864 * board_added(). In case of ACPI hotplug the firmware is expected
4865 * to configure the devices before OS is notified.
4866 */
4867 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4868 up_read(&pci_bus_sem);
4869 return 0;
4870 }
4871
4872 /* Take d3cold_delay requirements into account */
4873 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4874 if (!delay) {
4875 up_read(&pci_bus_sem);
4876 return 0;
4877 }
4878
4879 child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
4880 struct pci_dev, bus_list));
4881 up_read(&pci_bus_sem);
4882
4883 /*
4884 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4885 * accessing the device after reset (that is 1000 ms + 100 ms).
4886 */
4887 if (!pci_is_pcie(dev)) {
4888 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4889 msleep(1000 + delay);
4890 return 0;
4891 }
4892
4893 /*
4894 * For PCIe downstream and root ports that do not support speeds
4895 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4896 * speeds (gen3) we need to wait first for the data link layer to
4897 * become active.
4898 *
4899 * However, 100 ms is the minimum and the PCIe spec says the
4900 * software must allow at least 1s before it can determine that the
4901 * device that did not respond is a broken device. Also device can
4902 * take longer than that to respond if it indicates so through Request
4903 * Retry Status completions.
4904 *
4905 * Therefore we wait for 100 ms and check for the device presence
4906 * until the timeout expires.
4907 */
4908 if (!pcie_downstream_port(dev))
4909 return 0;
4910
4911 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4912 u16 status;
4913
4914 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4915 msleep(delay);
4916
4917 if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay))
4918 return 0;
4919
4920 /*
4921 * If the port supports active link reporting we now check
4922 * whether the link is active and if not bail out early with
4923 * the assumption that the device is not present anymore.
4924 */
4925 if (!dev->link_active_reporting)
4926 return -ENOTTY;
4927
4928 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status);
4929 if (!(status & PCI_EXP_LNKSTA_DLLLA))
4930 return -ENOTTY;
4931
4932 return pci_dev_wait(child, reset_type,
4933 PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT);
4934 }
4935
4936 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4937 delay);
4938 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4939 /* Did not train, no need to wait any further */
4940 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
4941 return -ENOTTY;
4942 }
4943
4944 return pci_dev_wait(child, reset_type,
4945 PCIE_RESET_READY_POLL_MS - delay);
4946 }
4947
pci_reset_secondary_bus(struct pci_dev * dev)4948 void pci_reset_secondary_bus(struct pci_dev *dev)
4949 {
4950 u16 ctrl;
4951
4952 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4953 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4954 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4955
4956 /*
4957 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
4958 * this to 2ms to ensure that we meet the minimum requirement.
4959 */
4960 msleep(2);
4961
4962 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4963 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4964 }
4965
pcibios_reset_secondary_bus(struct pci_dev * dev)4966 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4967 {
4968 pci_reset_secondary_bus(dev);
4969 }
4970
4971 /**
4972 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4973 * @dev: Bridge device
4974 *
4975 * Use the bridge control register to assert reset on the secondary bus.
4976 * Devices on the secondary bus are left in power-on state.
4977 */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)4978 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4979 {
4980 if (!dev->block_cfg_access)
4981 pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
4982 __builtin_return_address(0));
4983 pcibios_reset_secondary_bus(dev);
4984
4985 return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
4986 }
4987 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4988
pci_parent_bus_reset(struct pci_dev * dev,bool probe)4989 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
4990 {
4991 struct pci_dev *pdev;
4992
4993 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4994 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4995 return -ENOTTY;
4996
4997 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4998 if (pdev != dev)
4999 return -ENOTTY;
5000
5001 if (probe)
5002 return 0;
5003
5004 return pci_bridge_secondary_bus_reset(dev->bus->self);
5005 }
5006
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,bool probe)5007 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5008 {
5009 int rc = -ENOTTY;
5010
5011 if (!hotplug || !try_module_get(hotplug->owner))
5012 return rc;
5013
5014 if (hotplug->ops->reset_slot)
5015 rc = hotplug->ops->reset_slot(hotplug, probe);
5016
5017 module_put(hotplug->owner);
5018
5019 return rc;
5020 }
5021
pci_dev_reset_slot_function(struct pci_dev * dev,bool probe)5022 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5023 {
5024 if (dev->multifunction || dev->subordinate || !dev->slot ||
5025 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5026 return -ENOTTY;
5027
5028 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5029 }
5030
cxl_port_dvsec(struct pci_dev * dev)5031 static u16 cxl_port_dvsec(struct pci_dev *dev)
5032 {
5033 return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
5034 PCI_DVSEC_CXL_PORT);
5035 }
5036
cxl_sbr_masked(struct pci_dev * dev)5037 static bool cxl_sbr_masked(struct pci_dev *dev)
5038 {
5039 u16 dvsec, reg;
5040 int rc;
5041
5042 dvsec = cxl_port_dvsec(dev);
5043 if (!dvsec)
5044 return false;
5045
5046 rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, ®);
5047 if (rc || PCI_POSSIBLE_ERROR(reg))
5048 return false;
5049
5050 /*
5051 * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR
5052 * bit in Bridge Control has no effect. When 1, the Port generates
5053 * hot reset when the SBR bit is set to 1.
5054 */
5055 if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR)
5056 return false;
5057
5058 return true;
5059 }
5060
pci_reset_bus_function(struct pci_dev * dev,bool probe)5061 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5062 {
5063 struct pci_dev *bridge = pci_upstream_bridge(dev);
5064 int rc;
5065
5066 /*
5067 * If "dev" is below a CXL port that has SBR control masked, SBR
5068 * won't do anything, so return error.
5069 */
5070 if (bridge && cxl_sbr_masked(bridge)) {
5071 if (probe)
5072 return 0;
5073
5074 return -ENOTTY;
5075 }
5076
5077 rc = pci_dev_reset_slot_function(dev, probe);
5078 if (rc != -ENOTTY)
5079 return rc;
5080 return pci_parent_bus_reset(dev, probe);
5081 }
5082
cxl_reset_bus_function(struct pci_dev * dev,bool probe)5083 static int cxl_reset_bus_function(struct pci_dev *dev, bool probe)
5084 {
5085 struct pci_dev *bridge;
5086 u16 dvsec, reg, val;
5087 int rc;
5088
5089 bridge = pci_upstream_bridge(dev);
5090 if (!bridge)
5091 return -ENOTTY;
5092
5093 dvsec = cxl_port_dvsec(bridge);
5094 if (!dvsec)
5095 return -ENOTTY;
5096
5097 if (probe)
5098 return 0;
5099
5100 rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, ®);
5101 if (rc)
5102 return -ENOTTY;
5103
5104 if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) {
5105 val = reg;
5106 } else {
5107 val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR;
5108 pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
5109 val);
5110 }
5111
5112 rc = pci_reset_bus_function(dev, probe);
5113
5114 if (reg != val)
5115 pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
5116 reg);
5117
5118 return rc;
5119 }
5120
pci_dev_lock(struct pci_dev * dev)5121 void pci_dev_lock(struct pci_dev *dev)
5122 {
5123 /* block PM suspend, driver probe, etc. */
5124 device_lock(&dev->dev);
5125 pci_cfg_access_lock(dev);
5126 }
5127 EXPORT_SYMBOL_GPL(pci_dev_lock);
5128
5129 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)5130 int pci_dev_trylock(struct pci_dev *dev)
5131 {
5132 if (device_trylock(&dev->dev)) {
5133 if (pci_cfg_access_trylock(dev))
5134 return 1;
5135 device_unlock(&dev->dev);
5136 }
5137
5138 return 0;
5139 }
5140 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5141
pci_dev_unlock(struct pci_dev * dev)5142 void pci_dev_unlock(struct pci_dev *dev)
5143 {
5144 pci_cfg_access_unlock(dev);
5145 device_unlock(&dev->dev);
5146 }
5147 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5148
pci_dev_save_and_disable(struct pci_dev * dev)5149 static void pci_dev_save_and_disable(struct pci_dev *dev)
5150 {
5151 const struct pci_error_handlers *err_handler =
5152 dev->driver ? dev->driver->err_handler : NULL;
5153
5154 /*
5155 * dev->driver->err_handler->reset_prepare() is protected against
5156 * races with ->remove() by the device lock, which must be held by
5157 * the caller.
5158 */
5159 if (err_handler && err_handler->reset_prepare)
5160 err_handler->reset_prepare(dev);
5161
5162 /*
5163 * Wake-up device prior to save. PM registers default to D0 after
5164 * reset and a simple register restore doesn't reliably return
5165 * to a non-D0 state anyway.
5166 */
5167 pci_set_power_state(dev, PCI_D0);
5168
5169 pci_save_state(dev);
5170 /*
5171 * Disable the device by clearing the Command register, except for
5172 * INTx-disable which is set. This not only disables MMIO and I/O port
5173 * BARs, but also prevents the device from being Bus Master, preventing
5174 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
5175 * compliant devices, INTx-disable prevents legacy interrupts.
5176 */
5177 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5178 }
5179
pci_dev_restore(struct pci_dev * dev)5180 static void pci_dev_restore(struct pci_dev *dev)
5181 {
5182 const struct pci_error_handlers *err_handler =
5183 dev->driver ? dev->driver->err_handler : NULL;
5184
5185 pci_restore_state(dev);
5186
5187 /*
5188 * dev->driver->err_handler->reset_done() is protected against
5189 * races with ->remove() by the device lock, which must be held by
5190 * the caller.
5191 */
5192 if (err_handler && err_handler->reset_done)
5193 err_handler->reset_done(dev);
5194 }
5195
5196 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5197 static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5198 { },
5199 { pci_dev_specific_reset, .name = "device_specific" },
5200 { pci_dev_acpi_reset, .name = "acpi" },
5201 { pcie_reset_flr, .name = "flr" },
5202 { pci_af_flr, .name = "af_flr" },
5203 { pci_pm_reset, .name = "pm" },
5204 { pci_reset_bus_function, .name = "bus" },
5205 { cxl_reset_bus_function, .name = "cxl_bus" },
5206 };
5207
reset_method_show(struct device * dev,struct device_attribute * attr,char * buf)5208 static ssize_t reset_method_show(struct device *dev,
5209 struct device_attribute *attr, char *buf)
5210 {
5211 struct pci_dev *pdev = to_pci_dev(dev);
5212 ssize_t len = 0;
5213 int i, m;
5214
5215 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5216 m = pdev->reset_methods[i];
5217 if (!m)
5218 break;
5219
5220 len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5221 pci_reset_fn_methods[m].name);
5222 }
5223
5224 if (len)
5225 len += sysfs_emit_at(buf, len, "\n");
5226
5227 return len;
5228 }
5229
reset_method_lookup(const char * name)5230 static int reset_method_lookup(const char *name)
5231 {
5232 int m;
5233
5234 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5235 if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5236 return m;
5237 }
5238
5239 return 0; /* not found */
5240 }
5241
reset_method_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5242 static ssize_t reset_method_store(struct device *dev,
5243 struct device_attribute *attr,
5244 const char *buf, size_t count)
5245 {
5246 struct pci_dev *pdev = to_pci_dev(dev);
5247 char *options, *name;
5248 int m, n;
5249 u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5250
5251 if (sysfs_streq(buf, "")) {
5252 pdev->reset_methods[0] = 0;
5253 pci_warn(pdev, "All device reset methods disabled by user");
5254 return count;
5255 }
5256
5257 if (sysfs_streq(buf, "default")) {
5258 pci_init_reset_methods(pdev);
5259 return count;
5260 }
5261
5262 options = kstrndup(buf, count, GFP_KERNEL);
5263 if (!options)
5264 return -ENOMEM;
5265
5266 n = 0;
5267 while ((name = strsep(&options, " ")) != NULL) {
5268 if (sysfs_streq(name, ""))
5269 continue;
5270
5271 name = strim(name);
5272
5273 m = reset_method_lookup(name);
5274 if (!m) {
5275 pci_err(pdev, "Invalid reset method '%s'", name);
5276 goto error;
5277 }
5278
5279 if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5280 pci_err(pdev, "Unsupported reset method '%s'", name);
5281 goto error;
5282 }
5283
5284 if (n == PCI_NUM_RESET_METHODS - 1) {
5285 pci_err(pdev, "Too many reset methods\n");
5286 goto error;
5287 }
5288
5289 reset_methods[n++] = m;
5290 }
5291
5292 reset_methods[n] = 0;
5293
5294 /* Warn if dev-specific supported but not highest priority */
5295 if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5296 reset_methods[0] != 1)
5297 pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5298 memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5299 kfree(options);
5300 return count;
5301
5302 error:
5303 /* Leave previous methods unchanged */
5304 kfree(options);
5305 return -EINVAL;
5306 }
5307 static DEVICE_ATTR_RW(reset_method);
5308
5309 static struct attribute *pci_dev_reset_method_attrs[] = {
5310 &dev_attr_reset_method.attr,
5311 NULL,
5312 };
5313
pci_dev_reset_method_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)5314 static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5315 struct attribute *a, int n)
5316 {
5317 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5318
5319 if (!pci_reset_supported(pdev))
5320 return 0;
5321
5322 return a->mode;
5323 }
5324
5325 const struct attribute_group pci_dev_reset_method_attr_group = {
5326 .attrs = pci_dev_reset_method_attrs,
5327 .is_visible = pci_dev_reset_method_attr_is_visible,
5328 };
5329
5330 /**
5331 * __pci_reset_function_locked - reset a PCI device function while holding
5332 * the @dev mutex lock.
5333 * @dev: PCI device to reset
5334 *
5335 * Some devices allow an individual function to be reset without affecting
5336 * other functions in the same device. The PCI device must be responsive
5337 * to PCI config space in order to use this function.
5338 *
5339 * The device function is presumed to be unused and the caller is holding
5340 * the device mutex lock when this function is called.
5341 *
5342 * Resetting the device will make the contents of PCI configuration space
5343 * random, so any caller of this must be prepared to reinitialise the
5344 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5345 * etc.
5346 *
5347 * Returns 0 if the device function was successfully reset or negative if the
5348 * device doesn't support resetting a single function.
5349 */
__pci_reset_function_locked(struct pci_dev * dev)5350 int __pci_reset_function_locked(struct pci_dev *dev)
5351 {
5352 int i, m, rc;
5353
5354 might_sleep();
5355
5356 /*
5357 * A reset method returns -ENOTTY if it doesn't support this device and
5358 * we should try the next method.
5359 *
5360 * If it returns 0 (success), we're finished. If it returns any other
5361 * error, we're also finished: this indicates that further reset
5362 * mechanisms might be broken on the device.
5363 */
5364 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5365 m = dev->reset_methods[i];
5366 if (!m)
5367 return -ENOTTY;
5368
5369 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5370 if (!rc)
5371 return 0;
5372 if (rc != -ENOTTY)
5373 return rc;
5374 }
5375
5376 return -ENOTTY;
5377 }
5378 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5379
5380 /**
5381 * pci_init_reset_methods - check whether device can be safely reset
5382 * and store supported reset mechanisms.
5383 * @dev: PCI device to check for reset mechanisms
5384 *
5385 * Some devices allow an individual function to be reset without affecting
5386 * other functions in the same device. The PCI device must be in D0-D3hot
5387 * state.
5388 *
5389 * Stores reset mechanisms supported by device in reset_methods byte array
5390 * which is a member of struct pci_dev.
5391 */
pci_init_reset_methods(struct pci_dev * dev)5392 void pci_init_reset_methods(struct pci_dev *dev)
5393 {
5394 int m, i, rc;
5395
5396 BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5397
5398 might_sleep();
5399
5400 i = 0;
5401 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5402 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5403 if (!rc)
5404 dev->reset_methods[i++] = m;
5405 else if (rc != -ENOTTY)
5406 break;
5407 }
5408
5409 dev->reset_methods[i] = 0;
5410 }
5411
5412 /**
5413 * pci_reset_function - quiesce and reset a PCI device function
5414 * @dev: PCI device to reset
5415 *
5416 * Some devices allow an individual function to be reset without affecting
5417 * other functions in the same device. The PCI device must be responsive
5418 * to PCI config space in order to use this function.
5419 *
5420 * This function does not just reset the PCI portion of a device, but
5421 * clears all the state associated with the device. This function differs
5422 * from __pci_reset_function_locked() in that it saves and restores device state
5423 * over the reset and takes the PCI device lock.
5424 *
5425 * Returns 0 if the device function was successfully reset or negative if the
5426 * device doesn't support resetting a single function.
5427 */
pci_reset_function(struct pci_dev * dev)5428 int pci_reset_function(struct pci_dev *dev)
5429 {
5430 struct pci_dev *bridge;
5431 int rc;
5432
5433 if (!pci_reset_supported(dev))
5434 return -ENOTTY;
5435
5436 /*
5437 * If there's no upstream bridge, no locking is needed since there is
5438 * no upstream bridge configuration to hold consistent.
5439 */
5440 bridge = pci_upstream_bridge(dev);
5441 if (bridge)
5442 pci_dev_lock(bridge);
5443
5444 pci_dev_lock(dev);
5445 pci_dev_save_and_disable(dev);
5446
5447 rc = __pci_reset_function_locked(dev);
5448
5449 pci_dev_restore(dev);
5450 pci_dev_unlock(dev);
5451
5452 if (bridge)
5453 pci_dev_unlock(bridge);
5454
5455 return rc;
5456 }
5457 EXPORT_SYMBOL_GPL(pci_reset_function);
5458
5459 /**
5460 * pci_reset_function_locked - quiesce and reset a PCI device function
5461 * @dev: PCI device to reset
5462 *
5463 * Some devices allow an individual function to be reset without affecting
5464 * other functions in the same device. The PCI device must be responsive
5465 * to PCI config space in order to use this function.
5466 *
5467 * This function does not just reset the PCI portion of a device, but
5468 * clears all the state associated with the device. This function differs
5469 * from __pci_reset_function_locked() in that it saves and restores device state
5470 * over the reset. It also differs from pci_reset_function() in that it
5471 * requires the PCI device lock to be held.
5472 *
5473 * Returns 0 if the device function was successfully reset or negative if the
5474 * device doesn't support resetting a single function.
5475 */
pci_reset_function_locked(struct pci_dev * dev)5476 int pci_reset_function_locked(struct pci_dev *dev)
5477 {
5478 int rc;
5479
5480 if (!pci_reset_supported(dev))
5481 return -ENOTTY;
5482
5483 pci_dev_save_and_disable(dev);
5484
5485 rc = __pci_reset_function_locked(dev);
5486
5487 pci_dev_restore(dev);
5488
5489 return rc;
5490 }
5491 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5492
5493 /**
5494 * pci_try_reset_function - quiesce and reset a PCI device function
5495 * @dev: PCI device to reset
5496 *
5497 * Same as above, except return -EAGAIN if unable to lock device.
5498 */
pci_try_reset_function(struct pci_dev * dev)5499 int pci_try_reset_function(struct pci_dev *dev)
5500 {
5501 int rc;
5502
5503 if (!pci_reset_supported(dev))
5504 return -ENOTTY;
5505
5506 if (!pci_dev_trylock(dev))
5507 return -EAGAIN;
5508
5509 pci_dev_save_and_disable(dev);
5510 rc = __pci_reset_function_locked(dev);
5511 pci_dev_restore(dev);
5512 pci_dev_unlock(dev);
5513
5514 return rc;
5515 }
5516 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5517
5518 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resettable(struct pci_bus * bus)5519 static bool pci_bus_resettable(struct pci_bus *bus)
5520 {
5521 struct pci_dev *dev;
5522
5523
5524 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5525 return false;
5526
5527 list_for_each_entry(dev, &bus->devices, bus_list) {
5528 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5529 (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5530 return false;
5531 }
5532
5533 return true;
5534 }
5535
5536 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5537 static void pci_bus_lock(struct pci_bus *bus)
5538 {
5539 struct pci_dev *dev;
5540
5541 pci_dev_lock(bus->self);
5542 list_for_each_entry(dev, &bus->devices, bus_list) {
5543 if (dev->subordinate)
5544 pci_bus_lock(dev->subordinate);
5545 else
5546 pci_dev_lock(dev);
5547 }
5548 }
5549
5550 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5551 static void pci_bus_unlock(struct pci_bus *bus)
5552 {
5553 struct pci_dev *dev;
5554
5555 list_for_each_entry(dev, &bus->devices, bus_list) {
5556 if (dev->subordinate)
5557 pci_bus_unlock(dev->subordinate);
5558 else
5559 pci_dev_unlock(dev);
5560 }
5561 pci_dev_unlock(bus->self);
5562 }
5563
5564 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5565 static int pci_bus_trylock(struct pci_bus *bus)
5566 {
5567 struct pci_dev *dev;
5568
5569 if (!pci_dev_trylock(bus->self))
5570 return 0;
5571
5572 list_for_each_entry(dev, &bus->devices, bus_list) {
5573 if (dev->subordinate) {
5574 if (!pci_bus_trylock(dev->subordinate))
5575 goto unlock;
5576 } else if (!pci_dev_trylock(dev))
5577 goto unlock;
5578 }
5579 return 1;
5580
5581 unlock:
5582 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5583 if (dev->subordinate)
5584 pci_bus_unlock(dev->subordinate);
5585 else
5586 pci_dev_unlock(dev);
5587 }
5588 pci_dev_unlock(bus->self);
5589 return 0;
5590 }
5591
5592 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resettable(struct pci_slot * slot)5593 static bool pci_slot_resettable(struct pci_slot *slot)
5594 {
5595 struct pci_dev *dev;
5596
5597 if (slot->bus->self &&
5598 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5599 return false;
5600
5601 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5602 if (!dev->slot || dev->slot != slot)
5603 continue;
5604 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5605 (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5606 return false;
5607 }
5608
5609 return true;
5610 }
5611
5612 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5613 static void pci_slot_lock(struct pci_slot *slot)
5614 {
5615 struct pci_dev *dev;
5616
5617 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5618 if (!dev->slot || dev->slot != slot)
5619 continue;
5620 if (dev->subordinate)
5621 pci_bus_lock(dev->subordinate);
5622 else
5623 pci_dev_lock(dev);
5624 }
5625 }
5626
5627 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5628 static void pci_slot_unlock(struct pci_slot *slot)
5629 {
5630 struct pci_dev *dev;
5631
5632 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5633 if (!dev->slot || dev->slot != slot)
5634 continue;
5635 if (dev->subordinate)
5636 pci_bus_unlock(dev->subordinate);
5637 pci_dev_unlock(dev);
5638 }
5639 }
5640
5641 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5642 static int pci_slot_trylock(struct pci_slot *slot)
5643 {
5644 struct pci_dev *dev;
5645
5646 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5647 if (!dev->slot || dev->slot != slot)
5648 continue;
5649 if (dev->subordinate) {
5650 if (!pci_bus_trylock(dev->subordinate)) {
5651 pci_dev_unlock(dev);
5652 goto unlock;
5653 }
5654 } else if (!pci_dev_trylock(dev))
5655 goto unlock;
5656 }
5657 return 1;
5658
5659 unlock:
5660 list_for_each_entry_continue_reverse(dev,
5661 &slot->bus->devices, bus_list) {
5662 if (!dev->slot || dev->slot != slot)
5663 continue;
5664 if (dev->subordinate)
5665 pci_bus_unlock(dev->subordinate);
5666 else
5667 pci_dev_unlock(dev);
5668 }
5669 return 0;
5670 }
5671
5672 /*
5673 * Save and disable devices from the top of the tree down while holding
5674 * the @dev mutex lock for the entire tree.
5675 */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5676 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5677 {
5678 struct pci_dev *dev;
5679
5680 list_for_each_entry(dev, &bus->devices, bus_list) {
5681 pci_dev_save_and_disable(dev);
5682 if (dev->subordinate)
5683 pci_bus_save_and_disable_locked(dev->subordinate);
5684 }
5685 }
5686
5687 /*
5688 * Restore devices from top of the tree down while holding @dev mutex lock
5689 * for the entire tree. Parent bridges need to be restored before we can
5690 * get to subordinate devices.
5691 */
pci_bus_restore_locked(struct pci_bus * bus)5692 static void pci_bus_restore_locked(struct pci_bus *bus)
5693 {
5694 struct pci_dev *dev;
5695
5696 list_for_each_entry(dev, &bus->devices, bus_list) {
5697 pci_dev_restore(dev);
5698 if (dev->subordinate) {
5699 pci_bridge_wait_for_secondary_bus(dev, "bus reset");
5700 pci_bus_restore_locked(dev->subordinate);
5701 }
5702 }
5703 }
5704
5705 /*
5706 * Save and disable devices from the top of the tree down while holding
5707 * the @dev mutex lock for the entire tree.
5708 */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5709 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5710 {
5711 struct pci_dev *dev;
5712
5713 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5714 if (!dev->slot || dev->slot != slot)
5715 continue;
5716 pci_dev_save_and_disable(dev);
5717 if (dev->subordinate)
5718 pci_bus_save_and_disable_locked(dev->subordinate);
5719 }
5720 }
5721
5722 /*
5723 * Restore devices from top of the tree down while holding @dev mutex lock
5724 * for the entire tree. Parent bridges need to be restored before we can
5725 * get to subordinate devices.
5726 */
pci_slot_restore_locked(struct pci_slot * slot)5727 static void pci_slot_restore_locked(struct pci_slot *slot)
5728 {
5729 struct pci_dev *dev;
5730
5731 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5732 if (!dev->slot || dev->slot != slot)
5733 continue;
5734 pci_dev_restore(dev);
5735 if (dev->subordinate) {
5736 pci_bridge_wait_for_secondary_bus(dev, "slot reset");
5737 pci_bus_restore_locked(dev->subordinate);
5738 }
5739 }
5740 }
5741
pci_slot_reset(struct pci_slot * slot,bool probe)5742 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5743 {
5744 int rc;
5745
5746 if (!slot || !pci_slot_resettable(slot))
5747 return -ENOTTY;
5748
5749 if (!probe)
5750 pci_slot_lock(slot);
5751
5752 might_sleep();
5753
5754 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5755
5756 if (!probe)
5757 pci_slot_unlock(slot);
5758
5759 return rc;
5760 }
5761
5762 /**
5763 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5764 * @slot: PCI slot to probe
5765 *
5766 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5767 */
pci_probe_reset_slot(struct pci_slot * slot)5768 int pci_probe_reset_slot(struct pci_slot *slot)
5769 {
5770 return pci_slot_reset(slot, PCI_RESET_PROBE);
5771 }
5772 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5773
5774 /**
5775 * __pci_reset_slot - Try to reset a PCI slot
5776 * @slot: PCI slot to reset
5777 *
5778 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5779 * independent of other slots. For instance, some slots may support slot power
5780 * control. In the case of a 1:1 bus to slot architecture, this function may
5781 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5782 * Generally a slot reset should be attempted before a bus reset. All of the
5783 * function of the slot and any subordinate buses behind the slot are reset
5784 * through this function. PCI config space of all devices in the slot and
5785 * behind the slot is saved before and restored after reset.
5786 *
5787 * Same as above except return -EAGAIN if the slot cannot be locked
5788 */
__pci_reset_slot(struct pci_slot * slot)5789 static int __pci_reset_slot(struct pci_slot *slot)
5790 {
5791 int rc;
5792
5793 rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5794 if (rc)
5795 return rc;
5796
5797 if (pci_slot_trylock(slot)) {
5798 pci_slot_save_and_disable_locked(slot);
5799 might_sleep();
5800 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5801 pci_slot_restore_locked(slot);
5802 pci_slot_unlock(slot);
5803 } else
5804 rc = -EAGAIN;
5805
5806 return rc;
5807 }
5808
pci_bus_reset(struct pci_bus * bus,bool probe)5809 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5810 {
5811 int ret;
5812
5813 if (!bus->self || !pci_bus_resettable(bus))
5814 return -ENOTTY;
5815
5816 if (probe)
5817 return 0;
5818
5819 pci_bus_lock(bus);
5820
5821 might_sleep();
5822
5823 ret = pci_bridge_secondary_bus_reset(bus->self);
5824
5825 pci_bus_unlock(bus);
5826
5827 return ret;
5828 }
5829
5830 /**
5831 * pci_bus_error_reset - reset the bridge's subordinate bus
5832 * @bridge: The parent device that connects to the bus to reset
5833 *
5834 * This function will first try to reset the slots on this bus if the method is
5835 * available. If slot reset fails or is not available, this will fall back to a
5836 * secondary bus reset.
5837 */
pci_bus_error_reset(struct pci_dev * bridge)5838 int pci_bus_error_reset(struct pci_dev *bridge)
5839 {
5840 struct pci_bus *bus = bridge->subordinate;
5841 struct pci_slot *slot;
5842
5843 if (!bus)
5844 return -ENOTTY;
5845
5846 mutex_lock(&pci_slot_mutex);
5847 if (list_empty(&bus->slots))
5848 goto bus_reset;
5849
5850 list_for_each_entry(slot, &bus->slots, list)
5851 if (pci_probe_reset_slot(slot))
5852 goto bus_reset;
5853
5854 list_for_each_entry(slot, &bus->slots, list)
5855 if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5856 goto bus_reset;
5857
5858 mutex_unlock(&pci_slot_mutex);
5859 return 0;
5860 bus_reset:
5861 mutex_unlock(&pci_slot_mutex);
5862 return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5863 }
5864
5865 /**
5866 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5867 * @bus: PCI bus to probe
5868 *
5869 * Return 0 if bus can be reset, negative if a bus reset is not supported.
5870 */
pci_probe_reset_bus(struct pci_bus * bus)5871 int pci_probe_reset_bus(struct pci_bus *bus)
5872 {
5873 return pci_bus_reset(bus, PCI_RESET_PROBE);
5874 }
5875 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5876
5877 /**
5878 * __pci_reset_bus - Try to reset a PCI bus
5879 * @bus: top level PCI bus to reset
5880 *
5881 * Same as above except return -EAGAIN if the bus cannot be locked
5882 */
__pci_reset_bus(struct pci_bus * bus)5883 static int __pci_reset_bus(struct pci_bus *bus)
5884 {
5885 int rc;
5886
5887 rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5888 if (rc)
5889 return rc;
5890
5891 if (pci_bus_trylock(bus)) {
5892 pci_bus_save_and_disable_locked(bus);
5893 might_sleep();
5894 rc = pci_bridge_secondary_bus_reset(bus->self);
5895 pci_bus_restore_locked(bus);
5896 pci_bus_unlock(bus);
5897 } else
5898 rc = -EAGAIN;
5899
5900 return rc;
5901 }
5902
5903 /**
5904 * pci_reset_bus - Try to reset a PCI bus
5905 * @pdev: top level PCI device to reset via slot/bus
5906 *
5907 * Same as above except return -EAGAIN if the bus cannot be locked
5908 */
pci_reset_bus(struct pci_dev * pdev)5909 int pci_reset_bus(struct pci_dev *pdev)
5910 {
5911 return (!pci_probe_reset_slot(pdev->slot)) ?
5912 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5913 }
5914 EXPORT_SYMBOL_GPL(pci_reset_bus);
5915
5916 /**
5917 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5918 * @dev: PCI device to query
5919 *
5920 * Returns mmrbc: maximum designed memory read count in bytes or
5921 * appropriate error value.
5922 */
pcix_get_max_mmrbc(struct pci_dev * dev)5923 int pcix_get_max_mmrbc(struct pci_dev *dev)
5924 {
5925 int cap;
5926 u32 stat;
5927
5928 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5929 if (!cap)
5930 return -EINVAL;
5931
5932 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5933 return -EINVAL;
5934
5935 return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat);
5936 }
5937 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5938
5939 /**
5940 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5941 * @dev: PCI device to query
5942 *
5943 * Returns mmrbc: maximum memory read count in bytes or appropriate error
5944 * value.
5945 */
pcix_get_mmrbc(struct pci_dev * dev)5946 int pcix_get_mmrbc(struct pci_dev *dev)
5947 {
5948 int cap;
5949 u16 cmd;
5950
5951 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5952 if (!cap)
5953 return -EINVAL;
5954
5955 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5956 return -EINVAL;
5957
5958 return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5959 }
5960 EXPORT_SYMBOL(pcix_get_mmrbc);
5961
5962 /**
5963 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5964 * @dev: PCI device to query
5965 * @mmrbc: maximum memory read count in bytes
5966 * valid values are 512, 1024, 2048, 4096
5967 *
5968 * If possible sets maximum memory read byte count, some bridges have errata
5969 * that prevent this.
5970 */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)5971 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5972 {
5973 int cap;
5974 u32 stat, v, o;
5975 u16 cmd;
5976
5977 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5978 return -EINVAL;
5979
5980 v = ffs(mmrbc) - 10;
5981
5982 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5983 if (!cap)
5984 return -EINVAL;
5985
5986 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5987 return -EINVAL;
5988
5989 if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat))
5990 return -E2BIG;
5991
5992 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5993 return -EINVAL;
5994
5995 o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5996 if (o != v) {
5997 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5998 return -EIO;
5999
6000 cmd &= ~PCI_X_CMD_MAX_READ;
6001 cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v);
6002 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
6003 return -EIO;
6004 }
6005 return 0;
6006 }
6007 EXPORT_SYMBOL(pcix_set_mmrbc);
6008
6009 /**
6010 * pcie_get_readrq - get PCI Express read request size
6011 * @dev: PCI device to query
6012 *
6013 * Returns maximum memory read request in bytes or appropriate error value.
6014 */
pcie_get_readrq(struct pci_dev * dev)6015 int pcie_get_readrq(struct pci_dev *dev)
6016 {
6017 u16 ctl;
6018
6019 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6020
6021 return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl);
6022 }
6023 EXPORT_SYMBOL(pcie_get_readrq);
6024
6025 /**
6026 * pcie_set_readrq - set PCI Express maximum memory read request
6027 * @dev: PCI device to query
6028 * @rq: maximum memory read count in bytes
6029 * valid values are 128, 256, 512, 1024, 2048, 4096
6030 *
6031 * If possible sets maximum memory read request in bytes
6032 */
pcie_set_readrq(struct pci_dev * dev,int rq)6033 int pcie_set_readrq(struct pci_dev *dev, int rq)
6034 {
6035 u16 v;
6036 int ret;
6037 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
6038
6039 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
6040 return -EINVAL;
6041
6042 /*
6043 * If using the "performance" PCIe config, we clamp the read rq
6044 * size to the max packet size to keep the host bridge from
6045 * generating requests larger than we can cope with.
6046 */
6047 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
6048 int mps = pcie_get_mps(dev);
6049
6050 if (mps < rq)
6051 rq = mps;
6052 }
6053
6054 v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, ffs(rq) - 8);
6055
6056 if (bridge->no_inc_mrrs) {
6057 int max_mrrs = pcie_get_readrq(dev);
6058
6059 if (rq > max_mrrs) {
6060 pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
6061 return -EINVAL;
6062 }
6063 }
6064
6065 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6066 PCI_EXP_DEVCTL_READRQ, v);
6067
6068 return pcibios_err_to_errno(ret);
6069 }
6070 EXPORT_SYMBOL(pcie_set_readrq);
6071
6072 /**
6073 * pcie_get_mps - get PCI Express maximum payload size
6074 * @dev: PCI device to query
6075 *
6076 * Returns maximum payload size in bytes
6077 */
pcie_get_mps(struct pci_dev * dev)6078 int pcie_get_mps(struct pci_dev *dev)
6079 {
6080 u16 ctl;
6081
6082 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6083
6084 return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl);
6085 }
6086 EXPORT_SYMBOL(pcie_get_mps);
6087
6088 /**
6089 * pcie_set_mps - set PCI Express maximum payload size
6090 * @dev: PCI device to query
6091 * @mps: maximum payload size in bytes
6092 * valid values are 128, 256, 512, 1024, 2048, 4096
6093 *
6094 * If possible sets maximum payload size
6095 */
pcie_set_mps(struct pci_dev * dev,int mps)6096 int pcie_set_mps(struct pci_dev *dev, int mps)
6097 {
6098 u16 v;
6099 int ret;
6100
6101 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6102 return -EINVAL;
6103
6104 v = ffs(mps) - 8;
6105 if (v > dev->pcie_mpss)
6106 return -EINVAL;
6107 v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v);
6108
6109 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6110 PCI_EXP_DEVCTL_PAYLOAD, v);
6111
6112 return pcibios_err_to_errno(ret);
6113 }
6114 EXPORT_SYMBOL(pcie_set_mps);
6115
to_pcie_link_speed(u16 lnksta)6116 static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
6117 {
6118 return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
6119 }
6120
pcie_link_speed_mbps(struct pci_dev * pdev)6121 int pcie_link_speed_mbps(struct pci_dev *pdev)
6122 {
6123 u16 lnksta;
6124 int err;
6125
6126 err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
6127 if (err)
6128 return err;
6129
6130 return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta));
6131 }
6132 EXPORT_SYMBOL(pcie_link_speed_mbps);
6133
6134 /**
6135 * pcie_bandwidth_available - determine minimum link settings of a PCIe
6136 * device and its bandwidth limitation
6137 * @dev: PCI device to query
6138 * @limiting_dev: storage for device causing the bandwidth limitation
6139 * @speed: storage for speed of limiting device
6140 * @width: storage for width of limiting device
6141 *
6142 * Walk up the PCI device chain and find the point where the minimum
6143 * bandwidth is available. Return the bandwidth available there and (if
6144 * limiting_dev, speed, and width pointers are supplied) information about
6145 * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of
6146 * raw bandwidth.
6147 */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6148 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6149 enum pci_bus_speed *speed,
6150 enum pcie_link_width *width)
6151 {
6152 u16 lnksta;
6153 enum pci_bus_speed next_speed;
6154 enum pcie_link_width next_width;
6155 u32 bw, next_bw;
6156
6157 if (speed)
6158 *speed = PCI_SPEED_UNKNOWN;
6159 if (width)
6160 *width = PCIE_LNK_WIDTH_UNKNOWN;
6161
6162 bw = 0;
6163
6164 while (dev) {
6165 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6166
6167 next_speed = to_pcie_link_speed(lnksta);
6168 next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
6169
6170 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6171
6172 /* Check if current device limits the total bandwidth */
6173 if (!bw || next_bw <= bw) {
6174 bw = next_bw;
6175
6176 if (limiting_dev)
6177 *limiting_dev = dev;
6178 if (speed)
6179 *speed = next_speed;
6180 if (width)
6181 *width = next_width;
6182 }
6183
6184 dev = pci_upstream_bridge(dev);
6185 }
6186
6187 return bw;
6188 }
6189 EXPORT_SYMBOL(pcie_bandwidth_available);
6190
6191 /**
6192 * pcie_get_speed_cap - query for the PCI device's link speed capability
6193 * @dev: PCI device to query
6194 *
6195 * Query the PCI device speed capability. Return the maximum link speed
6196 * supported by the device.
6197 */
pcie_get_speed_cap(struct pci_dev * dev)6198 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6199 {
6200 u32 lnkcap2, lnkcap;
6201
6202 /*
6203 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
6204 * implementation note there recommends using the Supported Link
6205 * Speeds Vector in Link Capabilities 2 when supported.
6206 *
6207 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
6208 * should use the Supported Link Speeds field in Link Capabilities,
6209 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
6210 */
6211 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6212
6213 /* PCIe r3.0-compliant */
6214 if (lnkcap2)
6215 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6216
6217 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6218 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6219 return PCIE_SPEED_5_0GT;
6220 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6221 return PCIE_SPEED_2_5GT;
6222
6223 return PCI_SPEED_UNKNOWN;
6224 }
6225 EXPORT_SYMBOL(pcie_get_speed_cap);
6226
6227 /**
6228 * pcie_get_width_cap - query for the PCI device's link width capability
6229 * @dev: PCI device to query
6230 *
6231 * Query the PCI device width capability. Return the maximum link width
6232 * supported by the device.
6233 */
pcie_get_width_cap(struct pci_dev * dev)6234 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6235 {
6236 u32 lnkcap;
6237
6238 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6239 if (lnkcap)
6240 return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
6241
6242 return PCIE_LNK_WIDTH_UNKNOWN;
6243 }
6244 EXPORT_SYMBOL(pcie_get_width_cap);
6245
6246 /**
6247 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6248 * @dev: PCI device
6249 * @speed: storage for link speed
6250 * @width: storage for link width
6251 *
6252 * Calculate a PCI device's link bandwidth by querying for its link speed
6253 * and width, multiplying them, and applying encoding overhead. The result
6254 * is in Mb/s, i.e., megabits/second of raw bandwidth.
6255 */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6256 static u32 pcie_bandwidth_capable(struct pci_dev *dev,
6257 enum pci_bus_speed *speed,
6258 enum pcie_link_width *width)
6259 {
6260 *speed = pcie_get_speed_cap(dev);
6261 *width = pcie_get_width_cap(dev);
6262
6263 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6264 return 0;
6265
6266 return *width * PCIE_SPEED2MBS_ENC(*speed);
6267 }
6268
6269 /**
6270 * __pcie_print_link_status - Report the PCI device's link speed and width
6271 * @dev: PCI device to query
6272 * @verbose: Print info even when enough bandwidth is available
6273 *
6274 * If the available bandwidth at the device is less than the device is
6275 * capable of, report the device's maximum possible bandwidth and the
6276 * upstream link that limits its performance. If @verbose, always print
6277 * the available bandwidth, even if the device isn't constrained.
6278 */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)6279 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6280 {
6281 enum pcie_link_width width, width_cap;
6282 enum pci_bus_speed speed, speed_cap;
6283 struct pci_dev *limiting_dev = NULL;
6284 u32 bw_avail, bw_cap;
6285
6286 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6287 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6288
6289 if (bw_avail >= bw_cap && verbose)
6290 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6291 bw_cap / 1000, bw_cap % 1000,
6292 pci_speed_string(speed_cap), width_cap);
6293 else if (bw_avail < bw_cap)
6294 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6295 bw_avail / 1000, bw_avail % 1000,
6296 pci_speed_string(speed), width,
6297 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6298 bw_cap / 1000, bw_cap % 1000,
6299 pci_speed_string(speed_cap), width_cap);
6300 }
6301
6302 /**
6303 * pcie_print_link_status - Report the PCI device's link speed and width
6304 * @dev: PCI device to query
6305 *
6306 * Report the available bandwidth at the device.
6307 */
pcie_print_link_status(struct pci_dev * dev)6308 void pcie_print_link_status(struct pci_dev *dev)
6309 {
6310 __pcie_print_link_status(dev, true);
6311 }
6312 EXPORT_SYMBOL(pcie_print_link_status);
6313
6314 /**
6315 * pci_select_bars - Make BAR mask from the type of resource
6316 * @dev: the PCI device for which BAR mask is made
6317 * @flags: resource type mask to be selected
6318 *
6319 * This helper routine makes bar mask from the type of resource.
6320 */
pci_select_bars(struct pci_dev * dev,unsigned long flags)6321 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6322 {
6323 int i, bars = 0;
6324 for (i = 0; i < PCI_NUM_RESOURCES; i++)
6325 if (pci_resource_flags(dev, i) & flags)
6326 bars |= (1 << i);
6327 return bars;
6328 }
6329 EXPORT_SYMBOL(pci_select_bars);
6330
6331 /* Some architectures require additional programming to enable VGA */
6332 static arch_set_vga_state_t arch_set_vga_state;
6333
pci_register_set_vga_state(arch_set_vga_state_t func)6334 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6335 {
6336 arch_set_vga_state = func; /* NULL disables */
6337 }
6338
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6339 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6340 unsigned int command_bits, u32 flags)
6341 {
6342 if (arch_set_vga_state)
6343 return arch_set_vga_state(dev, decode, command_bits,
6344 flags);
6345 return 0;
6346 }
6347
6348 /**
6349 * pci_set_vga_state - set VGA decode state on device and parents if requested
6350 * @dev: the PCI device
6351 * @decode: true = enable decoding, false = disable decoding
6352 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6353 * @flags: traverse ancestors and change bridges
6354 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6355 */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6356 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6357 unsigned int command_bits, u32 flags)
6358 {
6359 struct pci_bus *bus;
6360 struct pci_dev *bridge;
6361 u16 cmd;
6362 int rc;
6363
6364 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6365
6366 /* ARCH specific VGA enables */
6367 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6368 if (rc)
6369 return rc;
6370
6371 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6372 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6373 if (decode)
6374 cmd |= command_bits;
6375 else
6376 cmd &= ~command_bits;
6377 pci_write_config_word(dev, PCI_COMMAND, cmd);
6378 }
6379
6380 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6381 return 0;
6382
6383 bus = dev->bus;
6384 while (bus) {
6385 bridge = bus->self;
6386 if (bridge) {
6387 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6388 &cmd);
6389 if (decode)
6390 cmd |= PCI_BRIDGE_CTL_VGA;
6391 else
6392 cmd &= ~PCI_BRIDGE_CTL_VGA;
6393 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6394 cmd);
6395 }
6396 bus = bus->parent;
6397 }
6398 return 0;
6399 }
6400
6401 #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)6402 bool pci_pr3_present(struct pci_dev *pdev)
6403 {
6404 struct acpi_device *adev;
6405
6406 if (acpi_disabled)
6407 return false;
6408
6409 adev = ACPI_COMPANION(&pdev->dev);
6410 if (!adev)
6411 return false;
6412
6413 return adev->power.flags.power_resources &&
6414 acpi_has_method(adev->handle, "_PR3");
6415 }
6416 EXPORT_SYMBOL_GPL(pci_pr3_present);
6417 #endif
6418
6419 /**
6420 * pci_add_dma_alias - Add a DMA devfn alias for a device
6421 * @dev: the PCI device for which alias is added
6422 * @devfn_from: alias slot and function
6423 * @nr_devfns: number of subsequent devfns to alias
6424 *
6425 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6426 * which is used to program permissible bus-devfn source addresses for DMA
6427 * requests in an IOMMU. These aliases factor into IOMMU group creation
6428 * and are useful for devices generating DMA requests beyond or different
6429 * from their logical bus-devfn. Examples include device quirks where the
6430 * device simply uses the wrong devfn, as well as non-transparent bridges
6431 * where the alias may be a proxy for devices in another domain.
6432 *
6433 * IOMMU group creation is performed during device discovery or addition,
6434 * prior to any potential DMA mapping and therefore prior to driver probing
6435 * (especially for userspace assigned devices where IOMMU group definition
6436 * cannot be left as a userspace activity). DMA aliases should therefore
6437 * be configured via quirks, such as the PCI fixup header quirk.
6438 */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn_from,unsigned int nr_devfns)6439 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6440 unsigned int nr_devfns)
6441 {
6442 int devfn_to;
6443
6444 nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6445 devfn_to = devfn_from + nr_devfns - 1;
6446
6447 if (!dev->dma_alias_mask)
6448 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6449 if (!dev->dma_alias_mask) {
6450 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6451 return;
6452 }
6453
6454 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6455
6456 if (nr_devfns == 1)
6457 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6458 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6459 else if (nr_devfns > 1)
6460 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6461 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6462 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6463 }
6464
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)6465 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6466 {
6467 return (dev1->dma_alias_mask &&
6468 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6469 (dev2->dma_alias_mask &&
6470 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6471 pci_real_dma_dev(dev1) == dev2 ||
6472 pci_real_dma_dev(dev2) == dev1;
6473 }
6474
pci_device_is_present(struct pci_dev * pdev)6475 bool pci_device_is_present(struct pci_dev *pdev)
6476 {
6477 u32 v;
6478
6479 /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6480 pdev = pci_physfn(pdev);
6481 if (pci_dev_is_disconnected(pdev))
6482 return false;
6483 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6484 }
6485 EXPORT_SYMBOL_GPL(pci_device_is_present);
6486
pci_ignore_hotplug(struct pci_dev * dev)6487 void pci_ignore_hotplug(struct pci_dev *dev)
6488 {
6489 struct pci_dev *bridge = dev->bus->self;
6490
6491 dev->ignore_hotplug = 1;
6492 /* Propagate the "ignore hotplug" setting to the parent bridge. */
6493 if (bridge)
6494 bridge->ignore_hotplug = 1;
6495 }
6496 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6497
6498 /**
6499 * pci_real_dma_dev - Get PCI DMA device for PCI device
6500 * @dev: the PCI device that may have a PCI DMA alias
6501 *
6502 * Permits the platform to provide architecture-specific functionality to
6503 * devices needing to alias DMA to another PCI device on another PCI bus. If
6504 * the PCI device is on the same bus, it is recommended to use
6505 * pci_add_dma_alias(). This is the default implementation. Architecture
6506 * implementations can override this.
6507 */
pci_real_dma_dev(struct pci_dev * dev)6508 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6509 {
6510 return dev;
6511 }
6512
pcibios_default_alignment(void)6513 resource_size_t __weak pcibios_default_alignment(void)
6514 {
6515 return 0;
6516 }
6517
6518 /*
6519 * Arches that don't want to expose struct resource to userland as-is in
6520 * sysfs and /proc can implement their own pci_resource_to_user().
6521 */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)6522 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6523 const struct resource *rsrc,
6524 resource_size_t *start, resource_size_t *end)
6525 {
6526 *start = rsrc->start;
6527 *end = rsrc->end;
6528 }
6529
6530 static char *resource_alignment_param;
6531 static DEFINE_SPINLOCK(resource_alignment_lock);
6532
6533 /**
6534 * pci_specified_resource_alignment - get resource alignment specified by user.
6535 * @dev: the PCI device to get
6536 * @resize: whether or not to change resources' size when reassigning alignment
6537 *
6538 * RETURNS: Resource alignment if it is specified.
6539 * Zero if it is not specified.
6540 */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)6541 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6542 bool *resize)
6543 {
6544 int align_order, count;
6545 resource_size_t align = pcibios_default_alignment();
6546 const char *p;
6547 int ret;
6548
6549 spin_lock(&resource_alignment_lock);
6550 p = resource_alignment_param;
6551 if (!p || !*p)
6552 goto out;
6553 if (pci_has_flag(PCI_PROBE_ONLY)) {
6554 align = 0;
6555 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6556 goto out;
6557 }
6558
6559 while (*p) {
6560 count = 0;
6561 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6562 p[count] == '@') {
6563 p += count + 1;
6564 if (align_order > 63) {
6565 pr_err("PCI: Invalid requested alignment (order %d)\n",
6566 align_order);
6567 align_order = PAGE_SHIFT;
6568 }
6569 } else {
6570 align_order = PAGE_SHIFT;
6571 }
6572
6573 ret = pci_dev_str_match(dev, p, &p);
6574 if (ret == 1) {
6575 *resize = true;
6576 align = 1ULL << align_order;
6577 break;
6578 } else if (ret < 0) {
6579 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6580 p);
6581 break;
6582 }
6583
6584 if (*p != ';' && *p != ',') {
6585 /* End of param or invalid format */
6586 break;
6587 }
6588 p++;
6589 }
6590 out:
6591 spin_unlock(&resource_alignment_lock);
6592 return align;
6593 }
6594
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6595 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6596 resource_size_t align, bool resize)
6597 {
6598 struct resource *r = &dev->resource[bar];
6599 const char *r_name = pci_resource_name(dev, bar);
6600 resource_size_t size;
6601
6602 if (!(r->flags & IORESOURCE_MEM))
6603 return;
6604
6605 if (r->flags & IORESOURCE_PCI_FIXED) {
6606 pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
6607 r_name, r, (unsigned long long)align);
6608 return;
6609 }
6610
6611 size = resource_size(r);
6612 if (size >= align)
6613 return;
6614
6615 /*
6616 * Increase the alignment of the resource. There are two ways we
6617 * can do this:
6618 *
6619 * 1) Increase the size of the resource. BARs are aligned on their
6620 * size, so when we reallocate space for this resource, we'll
6621 * allocate it with the larger alignment. This also prevents
6622 * assignment of any other BARs inside the alignment region, so
6623 * if we're requesting page alignment, this means no other BARs
6624 * will share the page.
6625 *
6626 * The disadvantage is that this makes the resource larger than
6627 * the hardware BAR, which may break drivers that compute things
6628 * based on the resource size, e.g., to find registers at a
6629 * fixed offset before the end of the BAR.
6630 *
6631 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6632 * set r->start to the desired alignment. By itself this
6633 * doesn't prevent other BARs being put inside the alignment
6634 * region, but if we realign *every* resource of every device in
6635 * the system, none of them will share an alignment region.
6636 *
6637 * When the user has requested alignment for only some devices via
6638 * the "pci=resource_alignment" argument, "resize" is true and we
6639 * use the first method. Otherwise we assume we're aligning all
6640 * devices and we use the second.
6641 */
6642
6643 pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
6644 r_name, r, (unsigned long long)align);
6645
6646 if (resize) {
6647 r->start = 0;
6648 r->end = align - 1;
6649 } else {
6650 r->flags &= ~IORESOURCE_SIZEALIGN;
6651 r->flags |= IORESOURCE_STARTALIGN;
6652 r->start = align;
6653 r->end = r->start + size - 1;
6654 }
6655 r->flags |= IORESOURCE_UNSET;
6656 }
6657
6658 /*
6659 * This function disables memory decoding and releases memory resources
6660 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6661 * It also rounds up size to specified alignment.
6662 * Later on, the kernel will assign page-aligned memory resource back
6663 * to the device.
6664 */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6665 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6666 {
6667 int i;
6668 struct resource *r;
6669 resource_size_t align;
6670 u16 command;
6671 bool resize = false;
6672
6673 /*
6674 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6675 * 3.4.1.11. Their resources are allocated from the space
6676 * described by the VF BARx register in the PF's SR-IOV capability.
6677 * We can't influence their alignment here.
6678 */
6679 if (dev->is_virtfn)
6680 return;
6681
6682 /* check if specified PCI is target device to reassign */
6683 align = pci_specified_resource_alignment(dev, &resize);
6684 if (!align)
6685 return;
6686
6687 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6688 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6689 pci_warn(dev, "Can't reassign resources to host bridge\n");
6690 return;
6691 }
6692
6693 pci_read_config_word(dev, PCI_COMMAND, &command);
6694 command &= ~PCI_COMMAND_MEMORY;
6695 pci_write_config_word(dev, PCI_COMMAND, command);
6696
6697 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6698 pci_request_resource_alignment(dev, i, align, resize);
6699
6700 /*
6701 * Need to disable bridge's resource window,
6702 * to enable the kernel to reassign new resource
6703 * window later on.
6704 */
6705 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6706 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6707 r = &dev->resource[i];
6708 if (!(r->flags & IORESOURCE_MEM))
6709 continue;
6710 r->flags |= IORESOURCE_UNSET;
6711 r->end = resource_size(r) - 1;
6712 r->start = 0;
6713 }
6714 pci_disable_bridge_window(dev);
6715 }
6716 }
6717
resource_alignment_show(const struct bus_type * bus,char * buf)6718 static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf)
6719 {
6720 size_t count = 0;
6721
6722 spin_lock(&resource_alignment_lock);
6723 if (resource_alignment_param)
6724 count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6725 spin_unlock(&resource_alignment_lock);
6726
6727 return count;
6728 }
6729
resource_alignment_store(const struct bus_type * bus,const char * buf,size_t count)6730 static ssize_t resource_alignment_store(const struct bus_type *bus,
6731 const char *buf, size_t count)
6732 {
6733 char *param, *old, *end;
6734
6735 if (count >= (PAGE_SIZE - 1))
6736 return -EINVAL;
6737
6738 param = kstrndup(buf, count, GFP_KERNEL);
6739 if (!param)
6740 return -ENOMEM;
6741
6742 end = strchr(param, '\n');
6743 if (end)
6744 *end = '\0';
6745
6746 spin_lock(&resource_alignment_lock);
6747 old = resource_alignment_param;
6748 if (strlen(param)) {
6749 resource_alignment_param = param;
6750 } else {
6751 kfree(param);
6752 resource_alignment_param = NULL;
6753 }
6754 spin_unlock(&resource_alignment_lock);
6755
6756 kfree(old);
6757
6758 return count;
6759 }
6760
6761 static BUS_ATTR_RW(resource_alignment);
6762
pci_resource_alignment_sysfs_init(void)6763 static int __init pci_resource_alignment_sysfs_init(void)
6764 {
6765 return bus_create_file(&pci_bus_type,
6766 &bus_attr_resource_alignment);
6767 }
6768 late_initcall(pci_resource_alignment_sysfs_init);
6769
pci_no_domains(void)6770 static void pci_no_domains(void)
6771 {
6772 #ifdef CONFIG_PCI_DOMAINS
6773 pci_domains_supported = 0;
6774 #endif
6775 }
6776
6777 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6778 static DEFINE_IDA(pci_domain_nr_static_ida);
6779 static DEFINE_IDA(pci_domain_nr_dynamic_ida);
6780
of_pci_reserve_static_domain_nr(void)6781 static void of_pci_reserve_static_domain_nr(void)
6782 {
6783 struct device_node *np;
6784 int domain_nr;
6785
6786 for_each_node_by_type(np, "pci") {
6787 domain_nr = of_get_pci_domain_nr(np);
6788 if (domain_nr < 0)
6789 continue;
6790 /*
6791 * Permanently allocate domain_nr in dynamic_ida
6792 * to prevent it from dynamic allocation.
6793 */
6794 ida_alloc_range(&pci_domain_nr_dynamic_ida,
6795 domain_nr, domain_nr, GFP_KERNEL);
6796 }
6797 }
6798
of_pci_bus_find_domain_nr(struct device * parent)6799 static int of_pci_bus_find_domain_nr(struct device *parent)
6800 {
6801 static bool static_domains_reserved = false;
6802 int domain_nr;
6803
6804 /* On the first call scan device tree for static allocations. */
6805 if (!static_domains_reserved) {
6806 of_pci_reserve_static_domain_nr();
6807 static_domains_reserved = true;
6808 }
6809
6810 if (parent) {
6811 /*
6812 * If domain is in DT, allocate it in static IDA. This
6813 * prevents duplicate static allocations in case of errors
6814 * in DT.
6815 */
6816 domain_nr = of_get_pci_domain_nr(parent->of_node);
6817 if (domain_nr >= 0)
6818 return ida_alloc_range(&pci_domain_nr_static_ida,
6819 domain_nr, domain_nr,
6820 GFP_KERNEL);
6821 }
6822
6823 /*
6824 * If domain was not specified in DT, choose a free ID from dynamic
6825 * allocations. All domain numbers from DT are permanently in
6826 * dynamic allocations to prevent assigning them to other DT nodes
6827 * without static domain.
6828 */
6829 return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
6830 }
6831
of_pci_bus_release_domain_nr(struct device * parent,int domain_nr)6832 static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6833 {
6834 if (domain_nr < 0)
6835 return;
6836
6837 /* Release domain from IDA where it was allocated. */
6838 if (of_get_pci_domain_nr(parent->of_node) == domain_nr)
6839 ida_free(&pci_domain_nr_static_ida, domain_nr);
6840 else
6841 ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
6842 }
6843
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6844 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6845 {
6846 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6847 acpi_pci_bus_find_domain_nr(bus);
6848 }
6849
pci_bus_release_domain_nr(struct device * parent,int domain_nr)6850 void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6851 {
6852 if (!acpi_disabled)
6853 return;
6854 of_pci_bus_release_domain_nr(parent, domain_nr);
6855 }
6856 #endif
6857
6858 /**
6859 * pci_ext_cfg_avail - can we access extended PCI config space?
6860 *
6861 * Returns 1 if we can access PCI extended config space (offsets
6862 * greater than 0xff). This is the default implementation. Architecture
6863 * implementations can override this.
6864 */
pci_ext_cfg_avail(void)6865 int __weak pci_ext_cfg_avail(void)
6866 {
6867 return 1;
6868 }
6869
pci_fixup_cardbus(struct pci_bus * bus)6870 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6871 {
6872 }
6873 EXPORT_SYMBOL(pci_fixup_cardbus);
6874
pci_setup(char * str)6875 static int __init pci_setup(char *str)
6876 {
6877 while (str) {
6878 char *k = strchr(str, ',');
6879 if (k)
6880 *k++ = 0;
6881 if (*str && (str = pcibios_setup(str)) && *str) {
6882 if (!strcmp(str, "nomsi")) {
6883 pci_no_msi();
6884 } else if (!strncmp(str, "noats", 5)) {
6885 pr_info("PCIe: ATS is disabled\n");
6886 pcie_ats_disabled = true;
6887 } else if (!strcmp(str, "noaer")) {
6888 pci_no_aer();
6889 } else if (!strcmp(str, "earlydump")) {
6890 pci_early_dump = true;
6891 } else if (!strncmp(str, "realloc=", 8)) {
6892 pci_realloc_get_opt(str + 8);
6893 } else if (!strncmp(str, "realloc", 7)) {
6894 pci_realloc_get_opt("on");
6895 } else if (!strcmp(str, "nodomains")) {
6896 pci_no_domains();
6897 } else if (!strncmp(str, "noari", 5)) {
6898 pcie_ari_disabled = true;
6899 } else if (!strncmp(str, "cbiosize=", 9)) {
6900 pci_cardbus_io_size = memparse(str + 9, &str);
6901 } else if (!strncmp(str, "cbmemsize=", 10)) {
6902 pci_cardbus_mem_size = memparse(str + 10, &str);
6903 } else if (!strncmp(str, "resource_alignment=", 19)) {
6904 resource_alignment_param = str + 19;
6905 } else if (!strncmp(str, "ecrc=", 5)) {
6906 pcie_ecrc_get_policy(str + 5);
6907 } else if (!strncmp(str, "hpiosize=", 9)) {
6908 pci_hotplug_io_size = memparse(str + 9, &str);
6909 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6910 pci_hotplug_mmio_size = memparse(str + 11, &str);
6911 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6912 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6913 } else if (!strncmp(str, "hpmemsize=", 10)) {
6914 pci_hotplug_mmio_size = memparse(str + 10, &str);
6915 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6916 } else if (!strncmp(str, "hpbussize=", 10)) {
6917 pci_hotplug_bus_size =
6918 simple_strtoul(str + 10, &str, 0);
6919 if (pci_hotplug_bus_size > 0xff)
6920 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6921 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6922 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6923 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6924 pcie_bus_config = PCIE_BUS_SAFE;
6925 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6926 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6927 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6928 pcie_bus_config = PCIE_BUS_PEER2PEER;
6929 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6930 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6931 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6932 disable_acs_redir_param = str + 18;
6933 } else if (!strncmp(str, "config_acs=", 11)) {
6934 config_acs_param = str + 11;
6935 } else {
6936 pr_err("PCI: Unknown option `%s'\n", str);
6937 }
6938 }
6939 str = k;
6940 }
6941 return 0;
6942 }
6943 early_param("pci", pci_setup);
6944
6945 /*
6946 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6947 * in pci_setup(), above, to point to data in the __initdata section which
6948 * will be freed after the init sequence is complete. We can't allocate memory
6949 * in pci_setup() because some architectures do not have any memory allocation
6950 * service available during an early_param() call. So we allocate memory and
6951 * copy the variable here before the init section is freed.
6952 *
6953 */
pci_realloc_setup_params(void)6954 static int __init pci_realloc_setup_params(void)
6955 {
6956 resource_alignment_param = kstrdup(resource_alignment_param,
6957 GFP_KERNEL);
6958 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6959 config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
6960
6961 return 0;
6962 }
6963 pure_initcall(pci_realloc_setup_params);
6964