1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 *
5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6 * David Mosberger-Tang
7 *
8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9 */
10
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/pci_hotplug.h>
29 #include <linux/vmalloc.h>
30 #include <asm/dma.h>
31 #include <linux/aer.h>
32 #include <linux/bitfield.h>
33 #include "pci.h"
34
35 DEFINE_MUTEX(pci_slot_mutex);
36
37 const char *pci_power_names[] = {
38 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
39 };
40 EXPORT_SYMBOL_GPL(pci_power_names);
41
42 #ifdef CONFIG_X86_32
43 int isa_dma_bridge_buggy;
44 EXPORT_SYMBOL(isa_dma_bridge_buggy);
45 #endif
46
47 int pci_pci_problems;
48 EXPORT_SYMBOL(pci_pci_problems);
49
50 unsigned int pci_pm_d3hot_delay;
51
52 static void pci_pme_list_scan(struct work_struct *work);
53
54 static LIST_HEAD(pci_pme_list);
55 static DEFINE_MUTEX(pci_pme_list_mutex);
56 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
57
58 struct pci_pme_device {
59 struct list_head list;
60 struct pci_dev *dev;
61 };
62
63 #define PME_TIMEOUT 1000 /* How long between PME checks */
64
65 /*
66 * Following exit from Conventional Reset, devices must be ready within 1 sec
67 * (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional
68 * Reset (PCIe r6.0 sec 5.8).
69 */
70 #define PCI_RESET_WAIT 1000 /* msec */
71
72 /*
73 * Devices may extend the 1 sec period through Request Retry Status
74 * completions (PCIe r6.0 sec 2.3.1). The spec does not provide an upper
75 * limit, but 60 sec ought to be enough for any device to become
76 * responsive.
77 */
78 #define PCIE_RESET_READY_POLL_MS 60000 /* msec */
79
pci_dev_d3_sleep(struct pci_dev * dev)80 static void pci_dev_d3_sleep(struct pci_dev *dev)
81 {
82 unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
83 unsigned int upper;
84
85 if (delay_ms) {
86 /* Use a 20% upper bound, 1ms minimum */
87 upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
88 usleep_range(delay_ms * USEC_PER_MSEC,
89 (delay_ms + upper) * USEC_PER_MSEC);
90 }
91 }
92
pci_reset_supported(struct pci_dev * dev)93 bool pci_reset_supported(struct pci_dev *dev)
94 {
95 return dev->reset_methods[0] != 0;
96 }
97
98 #ifdef CONFIG_PCI_DOMAINS
99 int pci_domains_supported = 1;
100 #endif
101
102 #define DEFAULT_CARDBUS_IO_SIZE (256)
103 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
104 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
105 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
106 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
107
108 #define DEFAULT_HOTPLUG_IO_SIZE (256)
109 #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
110 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
111 /* hpiosize=nn can override this */
112 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
113 /*
114 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
115 * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
116 * pci=hpmemsize=nnM overrides both
117 */
118 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
119 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
120
121 #define DEFAULT_HOTPLUG_BUS_SIZE 1
122 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
123
124
125 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
126 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
127 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
128 #elif defined CONFIG_PCIE_BUS_SAFE
129 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
130 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
131 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
132 #elif defined CONFIG_PCIE_BUS_PEER2PEER
133 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
134 #else
135 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
136 #endif
137
138 /*
139 * The default CLS is used if arch didn't set CLS explicitly and not
140 * all pci devices agree on the same value. Arch can override either
141 * the dfl or actual value as it sees fit. Don't forget this is
142 * measured in 32-bit words, not bytes.
143 */
144 u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2;
145 u8 pci_cache_line_size __ro_after_init ;
146
147 /*
148 * If we set up a device for bus mastering, we need to check the latency
149 * timer as certain BIOSes forget to set it properly.
150 */
151 unsigned int pcibios_max_latency = 255;
152
153 /* If set, the PCIe ARI capability will not be used. */
154 static bool pcie_ari_disabled;
155
156 /* If set, the PCIe ATS capability will not be used. */
157 static bool pcie_ats_disabled;
158
159 /* If set, the PCI config space of each device is printed during boot. */
160 bool pci_early_dump;
161
pci_ats_disabled(void)162 bool pci_ats_disabled(void)
163 {
164 return pcie_ats_disabled;
165 }
166 EXPORT_SYMBOL_GPL(pci_ats_disabled);
167
168 /* Disable bridge_d3 for all PCIe ports */
169 static bool pci_bridge_d3_disable;
170 /* Force bridge_d3 for all PCIe ports */
171 static bool pci_bridge_d3_force;
172
pcie_port_pm_setup(char * str)173 static int __init pcie_port_pm_setup(char *str)
174 {
175 if (!strcmp(str, "off"))
176 pci_bridge_d3_disable = true;
177 else if (!strcmp(str, "force"))
178 pci_bridge_d3_force = true;
179 return 1;
180 }
181 __setup("pcie_port_pm=", pcie_port_pm_setup);
182
183 /**
184 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
185 * @bus: pointer to PCI bus structure to search
186 *
187 * Given a PCI bus, returns the highest PCI bus number present in the set
188 * including the given PCI bus and its list of child PCI buses.
189 */
pci_bus_max_busnr(struct pci_bus * bus)190 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
191 {
192 struct pci_bus *tmp;
193 unsigned char max, n;
194
195 max = bus->busn_res.end;
196 list_for_each_entry(tmp, &bus->children, node) {
197 n = pci_bus_max_busnr(tmp);
198 if (n > max)
199 max = n;
200 }
201 return max;
202 }
203 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
204
205 /**
206 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
207 * @pdev: the PCI device
208 *
209 * Returns error bits set in PCI_STATUS and clears them.
210 */
pci_status_get_and_clear_errors(struct pci_dev * pdev)211 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
212 {
213 u16 status;
214 int ret;
215
216 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
217 if (ret != PCIBIOS_SUCCESSFUL)
218 return -EIO;
219
220 status &= PCI_STATUS_ERROR_BITS;
221 if (status)
222 pci_write_config_word(pdev, PCI_STATUS, status);
223
224 return status;
225 }
226 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
227
228 #ifdef CONFIG_HAS_IOMEM
__pci_ioremap_resource(struct pci_dev * pdev,int bar,bool write_combine)229 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
230 bool write_combine)
231 {
232 struct resource *res = &pdev->resource[bar];
233 resource_size_t start = res->start;
234 resource_size_t size = resource_size(res);
235
236 /*
237 * Make sure the BAR is actually a memory resource, not an IO resource
238 */
239 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
240 pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
241 return NULL;
242 }
243
244 if (write_combine)
245 return ioremap_wc(start, size);
246
247 return ioremap(start, size);
248 }
249
pci_ioremap_bar(struct pci_dev * pdev,int bar)250 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
251 {
252 return __pci_ioremap_resource(pdev, bar, false);
253 }
254 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
255
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)256 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
257 {
258 return __pci_ioremap_resource(pdev, bar, true);
259 }
260 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
261 #endif
262
263 /**
264 * pci_dev_str_match_path - test if a path string matches a device
265 * @dev: the PCI device to test
266 * @path: string to match the device against
267 * @endptr: pointer to the string after the match
268 *
269 * Test if a string (typically from a kernel parameter) formatted as a
270 * path of device/function addresses matches a PCI device. The string must
271 * be of the form:
272 *
273 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
274 *
275 * A path for a device can be obtained using 'lspci -t'. Using a path
276 * is more robust against bus renumbering than using only a single bus,
277 * device and function address.
278 *
279 * Returns 1 if the string matches the device, 0 if it does not and
280 * a negative error code if it fails to parse the string.
281 */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)282 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
283 const char **endptr)
284 {
285 int ret;
286 unsigned int seg, bus, slot, func;
287 char *wpath, *p;
288 char end;
289
290 *endptr = strchrnul(path, ';');
291
292 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
293 if (!wpath)
294 return -ENOMEM;
295
296 while (1) {
297 p = strrchr(wpath, '/');
298 if (!p)
299 break;
300 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
301 if (ret != 2) {
302 ret = -EINVAL;
303 goto free_and_exit;
304 }
305
306 if (dev->devfn != PCI_DEVFN(slot, func)) {
307 ret = 0;
308 goto free_and_exit;
309 }
310
311 /*
312 * Note: we don't need to get a reference to the upstream
313 * bridge because we hold a reference to the top level
314 * device which should hold a reference to the bridge,
315 * and so on.
316 */
317 dev = pci_upstream_bridge(dev);
318 if (!dev) {
319 ret = 0;
320 goto free_and_exit;
321 }
322
323 *p = 0;
324 }
325
326 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
327 &func, &end);
328 if (ret != 4) {
329 seg = 0;
330 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
331 if (ret != 3) {
332 ret = -EINVAL;
333 goto free_and_exit;
334 }
335 }
336
337 ret = (seg == pci_domain_nr(dev->bus) &&
338 bus == dev->bus->number &&
339 dev->devfn == PCI_DEVFN(slot, func));
340
341 free_and_exit:
342 kfree(wpath);
343 return ret;
344 }
345
346 /**
347 * pci_dev_str_match - test if a string matches a device
348 * @dev: the PCI device to test
349 * @p: string to match the device against
350 * @endptr: pointer to the string after the match
351 *
352 * Test if a string (typically from a kernel parameter) matches a specified
353 * PCI device. The string may be of one of the following formats:
354 *
355 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
356 * pci:<vendor>:<device>[:<subvendor>:<subdevice>]
357 *
358 * The first format specifies a PCI bus/device/function address which
359 * may change if new hardware is inserted, if motherboard firmware changes,
360 * or due to changes caused in kernel parameters. If the domain is
361 * left unspecified, it is taken to be 0. In order to be robust against
362 * bus renumbering issues, a path of PCI device/function numbers may be used
363 * to address the specific device. The path for a device can be determined
364 * through the use of 'lspci -t'.
365 *
366 * The second format matches devices using IDs in the configuration
367 * space which may match multiple devices in the system. A value of 0
368 * for any field will match all devices. (Note: this differs from
369 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
370 * legacy reasons and convenience so users don't have to specify
371 * FFFFFFFFs on the command line.)
372 *
373 * Returns 1 if the string matches the device, 0 if it does not and
374 * a negative error code if the string cannot be parsed.
375 */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)376 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
377 const char **endptr)
378 {
379 int ret;
380 int count;
381 unsigned short vendor, device, subsystem_vendor, subsystem_device;
382
383 if (strncmp(p, "pci:", 4) == 0) {
384 /* PCI vendor/device (subvendor/subdevice) IDs are specified */
385 p += 4;
386 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
387 &subsystem_vendor, &subsystem_device, &count);
388 if (ret != 4) {
389 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
390 if (ret != 2)
391 return -EINVAL;
392
393 subsystem_vendor = 0;
394 subsystem_device = 0;
395 }
396
397 p += count;
398
399 if ((!vendor || vendor == dev->vendor) &&
400 (!device || device == dev->device) &&
401 (!subsystem_vendor ||
402 subsystem_vendor == dev->subsystem_vendor) &&
403 (!subsystem_device ||
404 subsystem_device == dev->subsystem_device))
405 goto found;
406 } else {
407 /*
408 * PCI Bus, Device, Function IDs are specified
409 * (optionally, may include a path of devfns following it)
410 */
411 ret = pci_dev_str_match_path(dev, p, &p);
412 if (ret < 0)
413 return ret;
414 else if (ret)
415 goto found;
416 }
417
418 *endptr = p;
419 return 0;
420
421 found:
422 *endptr = p;
423 return 1;
424 }
425
__pci_find_next_cap_ttl(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap,int * ttl)426 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
427 u8 pos, int cap, int *ttl)
428 {
429 u8 id;
430 u16 ent;
431
432 pci_bus_read_config_byte(bus, devfn, pos, &pos);
433
434 while ((*ttl)--) {
435 if (pos < 0x40)
436 break;
437 pos &= ~3;
438 pci_bus_read_config_word(bus, devfn, pos, &ent);
439
440 id = ent & 0xff;
441 if (id == 0xff)
442 break;
443 if (id == cap)
444 return pos;
445 pos = (ent >> 8);
446 }
447 return 0;
448 }
449
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)450 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
451 u8 pos, int cap)
452 {
453 int ttl = PCI_FIND_CAP_TTL;
454
455 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
456 }
457
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)458 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
459 {
460 return __pci_find_next_cap(dev->bus, dev->devfn,
461 pos + PCI_CAP_LIST_NEXT, cap);
462 }
463 EXPORT_SYMBOL_GPL(pci_find_next_capability);
464
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)465 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
466 unsigned int devfn, u8 hdr_type)
467 {
468 u16 status;
469
470 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
471 if (!(status & PCI_STATUS_CAP_LIST))
472 return 0;
473
474 switch (hdr_type) {
475 case PCI_HEADER_TYPE_NORMAL:
476 case PCI_HEADER_TYPE_BRIDGE:
477 return PCI_CAPABILITY_LIST;
478 case PCI_HEADER_TYPE_CARDBUS:
479 return PCI_CB_CAPABILITY_LIST;
480 }
481
482 return 0;
483 }
484
485 /**
486 * pci_find_capability - query for devices' capabilities
487 * @dev: PCI device to query
488 * @cap: capability code
489 *
490 * Tell if a device supports a given PCI capability.
491 * Returns the address of the requested capability structure within the
492 * device's PCI configuration space or 0 in case the device does not
493 * support it. Possible values for @cap include:
494 *
495 * %PCI_CAP_ID_PM Power Management
496 * %PCI_CAP_ID_AGP Accelerated Graphics Port
497 * %PCI_CAP_ID_VPD Vital Product Data
498 * %PCI_CAP_ID_SLOTID Slot Identification
499 * %PCI_CAP_ID_MSI Message Signalled Interrupts
500 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
501 * %PCI_CAP_ID_PCIX PCI-X
502 * %PCI_CAP_ID_EXP PCI Express
503 */
pci_find_capability(struct pci_dev * dev,int cap)504 u8 pci_find_capability(struct pci_dev *dev, int cap)
505 {
506 u8 pos;
507
508 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
509 if (pos)
510 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
511
512 return pos;
513 }
514 EXPORT_SYMBOL(pci_find_capability);
515
516 /**
517 * pci_bus_find_capability - query for devices' capabilities
518 * @bus: the PCI bus to query
519 * @devfn: PCI device to query
520 * @cap: capability code
521 *
522 * Like pci_find_capability() but works for PCI devices that do not have a
523 * pci_dev structure set up yet.
524 *
525 * Returns the address of the requested capability structure within the
526 * device's PCI configuration space or 0 in case the device does not
527 * support it.
528 */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)529 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
530 {
531 u8 hdr_type, pos;
532
533 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
534
535 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK);
536 if (pos)
537 pos = __pci_find_next_cap(bus, devfn, pos, cap);
538
539 return pos;
540 }
541 EXPORT_SYMBOL(pci_bus_find_capability);
542
543 /**
544 * pci_find_next_ext_capability - Find an extended capability
545 * @dev: PCI device to query
546 * @start: address at which to start looking (0 to start at beginning of list)
547 * @cap: capability code
548 *
549 * Returns the address of the next matching extended capability structure
550 * within the device's PCI configuration space or 0 if the device does
551 * not support it. Some capabilities can occur several times, e.g., the
552 * vendor-specific capability, and this provides a way to find them all.
553 */
pci_find_next_ext_capability(struct pci_dev * dev,u16 start,int cap)554 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
555 {
556 u32 header;
557 int ttl;
558 u16 pos = PCI_CFG_SPACE_SIZE;
559
560 /* minimum 8 bytes per capability */
561 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
562
563 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
564 return 0;
565
566 if (start)
567 pos = start;
568
569 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
570 return 0;
571
572 /*
573 * If we have no capabilities, this is indicated by cap ID,
574 * cap version and next pointer all being 0.
575 */
576 if (header == 0)
577 return 0;
578
579 while (ttl-- > 0) {
580 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
581 return pos;
582
583 pos = PCI_EXT_CAP_NEXT(header);
584 if (pos < PCI_CFG_SPACE_SIZE)
585 break;
586
587 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
588 break;
589 }
590
591 return 0;
592 }
593 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
594
595 /**
596 * pci_find_ext_capability - Find an extended capability
597 * @dev: PCI device to query
598 * @cap: capability code
599 *
600 * Returns the address of the requested extended capability structure
601 * within the device's PCI configuration space or 0 if the device does
602 * not support it. Possible values for @cap include:
603 *
604 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
605 * %PCI_EXT_CAP_ID_VC Virtual Channel
606 * %PCI_EXT_CAP_ID_DSN Device Serial Number
607 * %PCI_EXT_CAP_ID_PWR Power Budgeting
608 */
pci_find_ext_capability(struct pci_dev * dev,int cap)609 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
610 {
611 return pci_find_next_ext_capability(dev, 0, cap);
612 }
613 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
614
615 /**
616 * pci_get_dsn - Read and return the 8-byte Device Serial Number
617 * @dev: PCI device to query
618 *
619 * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
620 * Number.
621 *
622 * Returns the DSN, or zero if the capability does not exist.
623 */
pci_get_dsn(struct pci_dev * dev)624 u64 pci_get_dsn(struct pci_dev *dev)
625 {
626 u32 dword;
627 u64 dsn;
628 int pos;
629
630 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
631 if (!pos)
632 return 0;
633
634 /*
635 * The Device Serial Number is two dwords offset 4 bytes from the
636 * capability position. The specification says that the first dword is
637 * the lower half, and the second dword is the upper half.
638 */
639 pos += 4;
640 pci_read_config_dword(dev, pos, &dword);
641 dsn = (u64)dword;
642 pci_read_config_dword(dev, pos + 4, &dword);
643 dsn |= ((u64)dword) << 32;
644
645 return dsn;
646 }
647 EXPORT_SYMBOL_GPL(pci_get_dsn);
648
__pci_find_next_ht_cap(struct pci_dev * dev,u8 pos,int ht_cap)649 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
650 {
651 int rc, ttl = PCI_FIND_CAP_TTL;
652 u8 cap, mask;
653
654 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
655 mask = HT_3BIT_CAP_MASK;
656 else
657 mask = HT_5BIT_CAP_MASK;
658
659 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
660 PCI_CAP_ID_HT, &ttl);
661 while (pos) {
662 rc = pci_read_config_byte(dev, pos + 3, &cap);
663 if (rc != PCIBIOS_SUCCESSFUL)
664 return 0;
665
666 if ((cap & mask) == ht_cap)
667 return pos;
668
669 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
670 pos + PCI_CAP_LIST_NEXT,
671 PCI_CAP_ID_HT, &ttl);
672 }
673
674 return 0;
675 }
676
677 /**
678 * pci_find_next_ht_capability - query a device's HyperTransport capabilities
679 * @dev: PCI device to query
680 * @pos: Position from which to continue searching
681 * @ht_cap: HyperTransport capability code
682 *
683 * To be used in conjunction with pci_find_ht_capability() to search for
684 * all capabilities matching @ht_cap. @pos should always be a value returned
685 * from pci_find_ht_capability().
686 *
687 * NB. To be 100% safe against broken PCI devices, the caller should take
688 * steps to avoid an infinite loop.
689 */
pci_find_next_ht_capability(struct pci_dev * dev,u8 pos,int ht_cap)690 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
691 {
692 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
693 }
694 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
695
696 /**
697 * pci_find_ht_capability - query a device's HyperTransport capabilities
698 * @dev: PCI device to query
699 * @ht_cap: HyperTransport capability code
700 *
701 * Tell if a device supports a given HyperTransport capability.
702 * Returns an address within the device's PCI configuration space
703 * or 0 in case the device does not support the request capability.
704 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
705 * which has a HyperTransport capability matching @ht_cap.
706 */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)707 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
708 {
709 u8 pos;
710
711 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
712 if (pos)
713 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
714
715 return pos;
716 }
717 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
718
719 /**
720 * pci_find_vsec_capability - Find a vendor-specific extended capability
721 * @dev: PCI device to query
722 * @vendor: Vendor ID for which capability is defined
723 * @cap: Vendor-specific capability ID
724 *
725 * If @dev has Vendor ID @vendor, search for a VSEC capability with
726 * VSEC ID @cap. If found, return the capability offset in
727 * config space; otherwise return 0.
728 */
pci_find_vsec_capability(struct pci_dev * dev,u16 vendor,int cap)729 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
730 {
731 u16 vsec = 0;
732 u32 header;
733 int ret;
734
735 if (vendor != dev->vendor)
736 return 0;
737
738 while ((vsec = pci_find_next_ext_capability(dev, vsec,
739 PCI_EXT_CAP_ID_VNDR))) {
740 ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
741 if (ret != PCIBIOS_SUCCESSFUL)
742 continue;
743
744 if (PCI_VNDR_HEADER_ID(header) == cap)
745 return vsec;
746 }
747
748 return 0;
749 }
750 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
751
752 /**
753 * pci_find_dvsec_capability - Find DVSEC for vendor
754 * @dev: PCI device to query
755 * @vendor: Vendor ID to match for the DVSEC
756 * @dvsec: Designated Vendor-specific capability ID
757 *
758 * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
759 * offset in config space; otherwise return 0.
760 */
pci_find_dvsec_capability(struct pci_dev * dev,u16 vendor,u16 dvsec)761 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
762 {
763 int pos;
764
765 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
766 if (!pos)
767 return 0;
768
769 while (pos) {
770 u16 v, id;
771
772 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
773 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
774 if (vendor == v && dvsec == id)
775 return pos;
776
777 pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
778 }
779
780 return 0;
781 }
782 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
783
784 /**
785 * pci_find_parent_resource - return resource region of parent bus of given
786 * region
787 * @dev: PCI device structure contains resources to be searched
788 * @res: child resource record for which parent is sought
789 *
790 * For given resource region of given device, return the resource region of
791 * parent bus the given region is contained in.
792 */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)793 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
794 struct resource *res)
795 {
796 const struct pci_bus *bus = dev->bus;
797 struct resource *r;
798
799 pci_bus_for_each_resource(bus, r) {
800 if (!r)
801 continue;
802 if (resource_contains(r, res)) {
803
804 /*
805 * If the window is prefetchable but the BAR is
806 * not, the allocator made a mistake.
807 */
808 if (r->flags & IORESOURCE_PREFETCH &&
809 !(res->flags & IORESOURCE_PREFETCH))
810 return NULL;
811
812 /*
813 * If we're below a transparent bridge, there may
814 * be both a positively-decoded aperture and a
815 * subtractively-decoded region that contain the BAR.
816 * We want the positively-decoded one, so this depends
817 * on pci_bus_for_each_resource() giving us those
818 * first.
819 */
820 return r;
821 }
822 }
823 return NULL;
824 }
825 EXPORT_SYMBOL(pci_find_parent_resource);
826
827 /**
828 * pci_find_resource - Return matching PCI device resource
829 * @dev: PCI device to query
830 * @res: Resource to look for
831 *
832 * Goes over standard PCI resources (BARs) and checks if the given resource
833 * is partially or fully contained in any of them. In that case the
834 * matching resource is returned, %NULL otherwise.
835 */
pci_find_resource(struct pci_dev * dev,struct resource * res)836 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
837 {
838 int i;
839
840 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
841 struct resource *r = &dev->resource[i];
842
843 if (r->start && resource_contains(r, res))
844 return r;
845 }
846
847 return NULL;
848 }
849 EXPORT_SYMBOL(pci_find_resource);
850
851 /**
852 * pci_resource_name - Return the name of the PCI resource
853 * @dev: PCI device to query
854 * @i: index of the resource
855 *
856 * Return the standard PCI resource (BAR) name according to their index.
857 */
pci_resource_name(struct pci_dev * dev,unsigned int i)858 const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
859 {
860 static const char * const bar_name[] = {
861 "BAR 0",
862 "BAR 1",
863 "BAR 2",
864 "BAR 3",
865 "BAR 4",
866 "BAR 5",
867 "ROM",
868 #ifdef CONFIG_PCI_IOV
869 "VF BAR 0",
870 "VF BAR 1",
871 "VF BAR 2",
872 "VF BAR 3",
873 "VF BAR 4",
874 "VF BAR 5",
875 #endif
876 "bridge window", /* "io" included in %pR */
877 "bridge window", /* "mem" included in %pR */
878 "bridge window", /* "mem pref" included in %pR */
879 };
880 static const char * const cardbus_name[] = {
881 "BAR 1",
882 "unknown",
883 "unknown",
884 "unknown",
885 "unknown",
886 "unknown",
887 #ifdef CONFIG_PCI_IOV
888 "unknown",
889 "unknown",
890 "unknown",
891 "unknown",
892 "unknown",
893 "unknown",
894 #endif
895 "CardBus bridge window 0", /* I/O */
896 "CardBus bridge window 1", /* I/O */
897 "CardBus bridge window 0", /* mem */
898 "CardBus bridge window 1", /* mem */
899 };
900
901 if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
902 i < ARRAY_SIZE(cardbus_name))
903 return cardbus_name[i];
904
905 if (i < ARRAY_SIZE(bar_name))
906 return bar_name[i];
907
908 return "unknown";
909 }
910
911 /**
912 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
913 * @dev: the PCI device to operate on
914 * @pos: config space offset of status word
915 * @mask: mask of bit(s) to care about in status word
916 *
917 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
918 */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)919 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
920 {
921 int i;
922
923 /* Wait for Transaction Pending bit clean */
924 for (i = 0; i < 4; i++) {
925 u16 status;
926 if (i)
927 msleep((1 << (i - 1)) * 100);
928
929 pci_read_config_word(dev, pos, &status);
930 if (!(status & mask))
931 return 1;
932 }
933
934 return 0;
935 }
936
937 static int pci_acs_enable;
938
939 /**
940 * pci_request_acs - ask for ACS to be enabled if supported
941 */
pci_request_acs(void)942 void pci_request_acs(void)
943 {
944 pci_acs_enable = 1;
945 }
946
947 static const char *disable_acs_redir_param;
948 static const char *config_acs_param;
949
950 struct pci_acs {
951 u16 cap;
952 u16 ctrl;
953 u16 fw_ctrl;
954 };
955
__pci_config_acs(struct pci_dev * dev,struct pci_acs * caps,const char * p,const u16 acs_mask,const u16 acs_flags)956 static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
957 const char *p, const u16 acs_mask, const u16 acs_flags)
958 {
959 u16 flags = acs_flags;
960 u16 mask = acs_mask;
961 char *delimit;
962 int ret = 0;
963
964 if (!p)
965 return;
966
967 while (*p) {
968 if (!acs_mask) {
969 /* Check for ACS flags */
970 delimit = strstr(p, "@");
971 if (delimit) {
972 int end;
973 u32 shift = 0;
974
975 end = delimit - p - 1;
976 mask = 0;
977 flags = 0;
978
979 while (end > -1) {
980 if (*(p + end) == '0') {
981 mask |= 1 << shift;
982 shift++;
983 end--;
984 } else if (*(p + end) == '1') {
985 mask |= 1 << shift;
986 flags |= 1 << shift;
987 shift++;
988 end--;
989 } else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
990 shift++;
991 end--;
992 } else {
993 pci_err(dev, "Invalid ACS flags... Ignoring\n");
994 return;
995 }
996 }
997 p = delimit + 1;
998 } else {
999 pci_err(dev, "ACS Flags missing\n");
1000 return;
1001 }
1002 }
1003
1004 if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
1005 PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
1006 pci_err(dev, "Invalid ACS flags specified\n");
1007 return;
1008 }
1009
1010 ret = pci_dev_str_match(dev, p, &p);
1011 if (ret < 0) {
1012 pr_info_once("PCI: Can't parse ACS command line parameter\n");
1013 break;
1014 } else if (ret == 1) {
1015 /* Found a match */
1016 break;
1017 }
1018
1019 if (*p != ';' && *p != ',') {
1020 /* End of param or invalid format */
1021 break;
1022 }
1023 p++;
1024 }
1025
1026 if (ret != 1)
1027 return;
1028
1029 if (!pci_dev_specific_disable_acs_redir(dev))
1030 return;
1031
1032 pci_dbg(dev, "ACS mask = %#06x\n", mask);
1033 pci_dbg(dev, "ACS flags = %#06x\n", flags);
1034 pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl);
1035 pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl);
1036
1037 /*
1038 * For mask bits that are 0, copy them from the firmware setting
1039 * and apply flags for all the mask bits that are 1.
1040 */
1041 caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask);
1042
1043 pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
1044 }
1045
1046 /**
1047 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
1048 * @dev: the PCI device
1049 * @caps: default ACS controls
1050 */
pci_std_enable_acs(struct pci_dev * dev,struct pci_acs * caps)1051 static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
1052 {
1053 /* Source Validation */
1054 caps->ctrl |= (caps->cap & PCI_ACS_SV);
1055
1056 /* P2P Request Redirect */
1057 caps->ctrl |= (caps->cap & PCI_ACS_RR);
1058
1059 /* P2P Completion Redirect */
1060 caps->ctrl |= (caps->cap & PCI_ACS_CR);
1061
1062 /* Upstream Forwarding */
1063 caps->ctrl |= (caps->cap & PCI_ACS_UF);
1064
1065 /* Enable Translation Blocking for external devices and noats */
1066 if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
1067 caps->ctrl |= (caps->cap & PCI_ACS_TB);
1068 }
1069
1070 /**
1071 * pci_enable_acs - enable ACS if hardware support it
1072 * @dev: the PCI device
1073 */
pci_enable_acs(struct pci_dev * dev)1074 static void pci_enable_acs(struct pci_dev *dev)
1075 {
1076 struct pci_acs caps;
1077 bool enable_acs = false;
1078 int pos;
1079
1080 /* If an iommu is present we start with kernel default caps */
1081 if (pci_acs_enable) {
1082 if (pci_dev_specific_enable_acs(dev))
1083 enable_acs = true;
1084 }
1085
1086 pos = dev->acs_cap;
1087 if (!pos)
1088 return;
1089
1090 pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
1091 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
1092 caps.fw_ctrl = caps.ctrl;
1093
1094 if (enable_acs)
1095 pci_std_enable_acs(dev, &caps);
1096
1097 /*
1098 * Always apply caps from the command line, even if there is no iommu.
1099 * Trust that the admin has a reason to change the ACS settings.
1100 */
1101 __pci_config_acs(dev, &caps, disable_acs_redir_param,
1102 PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
1103 ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
1104 __pci_config_acs(dev, &caps, config_acs_param, 0, 0);
1105
1106 pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
1107 }
1108
1109 /**
1110 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
1111 * @dev: PCI device to have its BARs restored
1112 *
1113 * Restore the BAR values for a given device, so as to make it
1114 * accessible by its driver.
1115 */
pci_restore_bars(struct pci_dev * dev)1116 static void pci_restore_bars(struct pci_dev *dev)
1117 {
1118 int i;
1119
1120 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1121 pci_update_resource(dev, i);
1122 }
1123
platform_pci_power_manageable(struct pci_dev * dev)1124 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1125 {
1126 if (pci_use_mid_pm())
1127 return true;
1128
1129 return acpi_pci_power_manageable(dev);
1130 }
1131
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)1132 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1133 pci_power_t t)
1134 {
1135 if (pci_use_mid_pm())
1136 return mid_pci_set_power_state(dev, t);
1137
1138 return acpi_pci_set_power_state(dev, t);
1139 }
1140
platform_pci_get_power_state(struct pci_dev * dev)1141 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1142 {
1143 if (pci_use_mid_pm())
1144 return mid_pci_get_power_state(dev);
1145
1146 return acpi_pci_get_power_state(dev);
1147 }
1148
platform_pci_refresh_power_state(struct pci_dev * dev)1149 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1150 {
1151 if (!pci_use_mid_pm())
1152 acpi_pci_refresh_power_state(dev);
1153 }
1154
platform_pci_choose_state(struct pci_dev * dev)1155 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1156 {
1157 if (pci_use_mid_pm())
1158 return PCI_POWER_ERROR;
1159
1160 return acpi_pci_choose_state(dev);
1161 }
1162
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)1163 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1164 {
1165 if (pci_use_mid_pm())
1166 return PCI_POWER_ERROR;
1167
1168 return acpi_pci_wakeup(dev, enable);
1169 }
1170
platform_pci_need_resume(struct pci_dev * dev)1171 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1172 {
1173 if (pci_use_mid_pm())
1174 return false;
1175
1176 return acpi_pci_need_resume(dev);
1177 }
1178
platform_pci_bridge_d3(struct pci_dev * dev)1179 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1180 {
1181 if (pci_use_mid_pm())
1182 return false;
1183
1184 return acpi_pci_bridge_d3(dev);
1185 }
1186
1187 /**
1188 * pci_update_current_state - Read power state of given device and cache it
1189 * @dev: PCI device to handle.
1190 * @state: State to cache in case the device doesn't have the PM capability
1191 *
1192 * The power state is read from the PMCSR register, which however is
1193 * inaccessible in D3cold. The platform firmware is therefore queried first
1194 * to detect accessibility of the register. In case the platform firmware
1195 * reports an incorrect state or the device isn't power manageable by the
1196 * platform at all, we try to detect D3cold by testing accessibility of the
1197 * vendor ID in config space.
1198 */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)1199 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1200 {
1201 if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1202 dev->current_state = PCI_D3cold;
1203 } else if (dev->pm_cap) {
1204 u16 pmcsr;
1205
1206 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1207 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1208 dev->current_state = PCI_D3cold;
1209 return;
1210 }
1211 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1212 } else {
1213 dev->current_state = state;
1214 }
1215 }
1216
1217 /**
1218 * pci_refresh_power_state - Refresh the given device's power state data
1219 * @dev: Target PCI device.
1220 *
1221 * Ask the platform to refresh the devices power state information and invoke
1222 * pci_update_current_state() to update its current PCI power state.
1223 */
pci_refresh_power_state(struct pci_dev * dev)1224 void pci_refresh_power_state(struct pci_dev *dev)
1225 {
1226 platform_pci_refresh_power_state(dev);
1227 pci_update_current_state(dev, dev->current_state);
1228 }
1229
1230 /**
1231 * pci_platform_power_transition - Use platform to change device power state
1232 * @dev: PCI device to handle.
1233 * @state: State to put the device into.
1234 */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)1235 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1236 {
1237 int error;
1238
1239 error = platform_pci_set_power_state(dev, state);
1240 if (!error)
1241 pci_update_current_state(dev, state);
1242 else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1243 dev->current_state = PCI_D0;
1244
1245 return error;
1246 }
1247 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1248
pci_resume_one(struct pci_dev * pci_dev,void * ign)1249 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1250 {
1251 pm_request_resume(&pci_dev->dev);
1252 return 0;
1253 }
1254
1255 /**
1256 * pci_resume_bus - Walk given bus and runtime resume devices on it
1257 * @bus: Top bus of the subtree to walk.
1258 */
pci_resume_bus(struct pci_bus * bus)1259 void pci_resume_bus(struct pci_bus *bus)
1260 {
1261 if (bus)
1262 pci_walk_bus(bus, pci_resume_one, NULL);
1263 }
1264
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)1265 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1266 {
1267 int delay = 1;
1268 bool retrain = false;
1269 struct pci_dev *root, *bridge;
1270
1271 root = pcie_find_root_port(dev);
1272
1273 if (pci_is_pcie(dev)) {
1274 bridge = pci_upstream_bridge(dev);
1275 if (bridge)
1276 retrain = true;
1277 }
1278
1279 /*
1280 * The caller has already waited long enough after a reset that the
1281 * device should respond to config requests, but it may respond
1282 * with Request Retry Status (RRS) if it needs more time to
1283 * initialize.
1284 *
1285 * If the device is below a Root Port with Configuration RRS
1286 * Software Visibility enabled, reading the Vendor ID returns a
1287 * special data value if the device responded with RRS. Read the
1288 * Vendor ID until we get non-RRS status.
1289 *
1290 * If there's no Root Port or Configuration RRS Software Visibility
1291 * is not enabled, the device may still respond with RRS, but
1292 * hardware may retry the config request. If no retries receive
1293 * Successful Completion, hardware generally synthesizes ~0
1294 * (PCI_ERROR_RESPONSE) data to complete the read. Reading Vendor
1295 * ID for VFs and non-existent devices also returns ~0, so read the
1296 * Command register until it returns something other than ~0.
1297 */
1298 for (;;) {
1299 u32 id;
1300
1301 if (pci_dev_is_disconnected(dev)) {
1302 pci_dbg(dev, "disconnected; not waiting\n");
1303 return -ENOTTY;
1304 }
1305
1306 if (root && root->config_rrs_sv) {
1307 pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
1308 if (!pci_bus_rrs_vendor_id(id))
1309 break;
1310 } else {
1311 pci_read_config_dword(dev, PCI_COMMAND, &id);
1312 if (!PCI_POSSIBLE_ERROR(id))
1313 break;
1314 }
1315
1316 if (delay > timeout) {
1317 pci_warn(dev, "not ready %dms after %s; giving up\n",
1318 delay - 1, reset_type);
1319 return -ENOTTY;
1320 }
1321
1322 if (delay > PCI_RESET_WAIT) {
1323 if (retrain) {
1324 retrain = false;
1325 if (pcie_failed_link_retrain(bridge) == 0) {
1326 delay = 1;
1327 continue;
1328 }
1329 }
1330 pci_info(dev, "not ready %dms after %s; waiting\n",
1331 delay - 1, reset_type);
1332 }
1333
1334 msleep(delay);
1335 delay *= 2;
1336 }
1337
1338 if (delay > PCI_RESET_WAIT)
1339 pci_info(dev, "ready %dms after %s\n", delay - 1,
1340 reset_type);
1341 else
1342 pci_dbg(dev, "ready %dms after %s\n", delay - 1,
1343 reset_type);
1344
1345 return 0;
1346 }
1347
1348 /**
1349 * pci_power_up - Put the given device into D0
1350 * @dev: PCI device to power up
1351 *
1352 * On success, return 0 or 1, depending on whether or not it is necessary to
1353 * restore the device's BARs subsequently (1 is returned in that case).
1354 *
1355 * On failure, return a negative error code. Always return failure if @dev
1356 * lacks a Power Management Capability, even if the platform was able to
1357 * put the device in D0 via non-PCI means.
1358 */
pci_power_up(struct pci_dev * dev)1359 int pci_power_up(struct pci_dev *dev)
1360 {
1361 bool need_restore;
1362 pci_power_t state;
1363 u16 pmcsr;
1364
1365 platform_pci_set_power_state(dev, PCI_D0);
1366
1367 if (!dev->pm_cap) {
1368 state = platform_pci_get_power_state(dev);
1369 if (state == PCI_UNKNOWN)
1370 dev->current_state = PCI_D0;
1371 else
1372 dev->current_state = state;
1373
1374 return -EIO;
1375 }
1376
1377 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1378 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1379 pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1380 pci_power_name(dev->current_state));
1381 dev->current_state = PCI_D3cold;
1382 return -EIO;
1383 }
1384
1385 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1386
1387 need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1388 !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1389
1390 if (state == PCI_D0)
1391 goto end;
1392
1393 /*
1394 * Force the entire word to 0. This doesn't affect PME_Status, disables
1395 * PME_En, and sets PowerState to 0.
1396 */
1397 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1398
1399 /* Mandatory transition delays; see PCI PM 1.2. */
1400 if (state == PCI_D3hot)
1401 pci_dev_d3_sleep(dev);
1402 else if (state == PCI_D2)
1403 udelay(PCI_PM_D2_DELAY);
1404
1405 end:
1406 dev->current_state = PCI_D0;
1407 if (need_restore)
1408 return 1;
1409
1410 return 0;
1411 }
1412
1413 /**
1414 * pci_set_full_power_state - Put a PCI device into D0 and update its state
1415 * @dev: PCI device to power up
1416 * @locked: whether pci_bus_sem is held
1417 *
1418 * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1419 * to confirm the state change, restore its BARs if they might be lost and
1420 * reconfigure ASPM in accordance with the new power state.
1421 *
1422 * If pci_restore_state() is going to be called right after a power state change
1423 * to D0, it is more efficient to use pci_power_up() directly instead of this
1424 * function.
1425 */
pci_set_full_power_state(struct pci_dev * dev,bool locked)1426 static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
1427 {
1428 u16 pmcsr;
1429 int ret;
1430
1431 ret = pci_power_up(dev);
1432 if (ret < 0) {
1433 if (dev->current_state == PCI_D0)
1434 return 0;
1435
1436 return ret;
1437 }
1438
1439 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1440 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1441 if (dev->current_state != PCI_D0) {
1442 pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1443 pci_power_name(dev->current_state));
1444 } else if (ret > 0) {
1445 /*
1446 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1447 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1448 * from D3hot to D0 _may_ perform an internal reset, thereby
1449 * going to "D0 Uninitialized" rather than "D0 Initialized".
1450 * For example, at least some versions of the 3c905B and the
1451 * 3c556B exhibit this behaviour.
1452 *
1453 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1454 * devices in a D3hot state at boot. Consequently, we need to
1455 * restore at least the BARs so that the device will be
1456 * accessible to its driver.
1457 */
1458 pci_restore_bars(dev);
1459 }
1460
1461 if (dev->bus->self)
1462 pcie_aspm_pm_state_change(dev->bus->self, locked);
1463
1464 return 0;
1465 }
1466
1467 /**
1468 * __pci_dev_set_current_state - Set current state of a PCI device
1469 * @dev: Device to handle
1470 * @data: pointer to state to be set
1471 */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1472 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1473 {
1474 pci_power_t state = *(pci_power_t *)data;
1475
1476 dev->current_state = state;
1477 return 0;
1478 }
1479
1480 /**
1481 * pci_bus_set_current_state - Walk given bus and set current state of devices
1482 * @bus: Top bus of the subtree to walk.
1483 * @state: state to be set
1484 */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1485 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1486 {
1487 if (bus)
1488 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1489 }
1490
__pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state,bool locked)1491 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
1492 {
1493 if (!bus)
1494 return;
1495
1496 if (locked)
1497 pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
1498 else
1499 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1500 }
1501
1502 /**
1503 * pci_set_low_power_state - Put a PCI device into a low-power state.
1504 * @dev: PCI device to handle.
1505 * @state: PCI power state (D1, D2, D3hot) to put the device into.
1506 * @locked: whether pci_bus_sem is held
1507 *
1508 * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1509 *
1510 * RETURN VALUE:
1511 * -EINVAL if the requested state is invalid.
1512 * -EIO if device does not support PCI PM or its PM capabilities register has a
1513 * wrong version, or device doesn't support the requested state.
1514 * 0 if device already is in the requested state.
1515 * 0 if device's power state has been successfully changed.
1516 */
pci_set_low_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1517 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1518 {
1519 u16 pmcsr;
1520
1521 if (!dev->pm_cap)
1522 return -EIO;
1523
1524 /*
1525 * Validate transition: We can enter D0 from any state, but if
1526 * we're already in a low-power state, we can only go deeper. E.g.,
1527 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1528 * we'd have to go from D3 to D0, then to D1.
1529 */
1530 if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1531 pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1532 pci_power_name(dev->current_state),
1533 pci_power_name(state));
1534 return -EINVAL;
1535 }
1536
1537 /* Check if this device supports the desired state */
1538 if ((state == PCI_D1 && !dev->d1_support)
1539 || (state == PCI_D2 && !dev->d2_support))
1540 return -EIO;
1541
1542 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1543 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1544 pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1545 pci_power_name(dev->current_state),
1546 pci_power_name(state));
1547 dev->current_state = PCI_D3cold;
1548 return -EIO;
1549 }
1550
1551 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1552 pmcsr |= state;
1553
1554 /* Enter specified state */
1555 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1556
1557 /* Mandatory power management transition delays; see PCI PM 1.2. */
1558 if (state == PCI_D3hot)
1559 pci_dev_d3_sleep(dev);
1560 else if (state == PCI_D2)
1561 udelay(PCI_PM_D2_DELAY);
1562
1563 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1564 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1565 if (dev->current_state != state)
1566 pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1567 pci_power_name(dev->current_state),
1568 pci_power_name(state));
1569
1570 if (dev->bus->self)
1571 pcie_aspm_pm_state_change(dev->bus->self, locked);
1572
1573 return 0;
1574 }
1575
__pci_set_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1576 static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1577 {
1578 int error;
1579
1580 /* Bound the state we're entering */
1581 if (state > PCI_D3cold)
1582 state = PCI_D3cold;
1583 else if (state < PCI_D0)
1584 state = PCI_D0;
1585 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1586
1587 /*
1588 * If the device or the parent bridge do not support PCI
1589 * PM, ignore the request if we're doing anything other
1590 * than putting it into D0 (which would only happen on
1591 * boot).
1592 */
1593 return 0;
1594
1595 /* Check if we're already there */
1596 if (dev->current_state == state)
1597 return 0;
1598
1599 if (state == PCI_D0)
1600 return pci_set_full_power_state(dev, locked);
1601
1602 /*
1603 * This device is quirked not to be put into D3, so don't put it in
1604 * D3
1605 */
1606 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1607 return 0;
1608
1609 if (state == PCI_D3cold) {
1610 /*
1611 * To put the device in D3cold, put it into D3hot in the native
1612 * way, then put it into D3cold using platform ops.
1613 */
1614 error = pci_set_low_power_state(dev, PCI_D3hot, locked);
1615
1616 if (pci_platform_power_transition(dev, PCI_D3cold))
1617 return error;
1618
1619 /* Powering off a bridge may power off the whole hierarchy */
1620 if (dev->current_state == PCI_D3cold)
1621 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
1622 } else {
1623 error = pci_set_low_power_state(dev, state, locked);
1624
1625 if (pci_platform_power_transition(dev, state))
1626 return error;
1627 }
1628
1629 return 0;
1630 }
1631
1632 /**
1633 * pci_set_power_state - Set the power state of a PCI device
1634 * @dev: PCI device to handle.
1635 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1636 *
1637 * Transition a device to a new power state, using the platform firmware and/or
1638 * the device's PCI PM registers.
1639 *
1640 * RETURN VALUE:
1641 * -EINVAL if the requested state is invalid.
1642 * -EIO if device does not support PCI PM or its PM capabilities register has a
1643 * wrong version, or device doesn't support the requested state.
1644 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1645 * 0 if device already is in the requested state.
1646 * 0 if the transition is to D3 but D3 is not supported.
1647 * 0 if device's power state has been successfully changed.
1648 */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1649 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1650 {
1651 return __pci_set_power_state(dev, state, false);
1652 }
1653 EXPORT_SYMBOL(pci_set_power_state);
1654
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)1655 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
1656 {
1657 lockdep_assert_held(&pci_bus_sem);
1658
1659 return __pci_set_power_state(dev, state, true);
1660 }
1661 EXPORT_SYMBOL(pci_set_power_state_locked);
1662
1663 #define PCI_EXP_SAVE_REGS 7
1664
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1665 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1666 u16 cap, bool extended)
1667 {
1668 struct pci_cap_saved_state *tmp;
1669
1670 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1671 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1672 return tmp;
1673 }
1674 return NULL;
1675 }
1676
pci_find_saved_cap(struct pci_dev * dev,char cap)1677 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1678 {
1679 return _pci_find_saved_cap(dev, cap, false);
1680 }
1681
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1682 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1683 {
1684 return _pci_find_saved_cap(dev, cap, true);
1685 }
1686
pci_save_pcie_state(struct pci_dev * dev)1687 static int pci_save_pcie_state(struct pci_dev *dev)
1688 {
1689 int i = 0;
1690 struct pci_cap_saved_state *save_state;
1691 u16 *cap;
1692
1693 if (!pci_is_pcie(dev))
1694 return 0;
1695
1696 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1697 if (!save_state) {
1698 pci_err(dev, "buffer not found in %s\n", __func__);
1699 return -ENOMEM;
1700 }
1701
1702 cap = (u16 *)&save_state->cap.data[0];
1703 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1704 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1705 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1706 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1707 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1708 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1709 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1710
1711 pci_save_aspm_l1ss_state(dev);
1712 pci_save_ltr_state(dev);
1713
1714 return 0;
1715 }
1716
pci_restore_pcie_state(struct pci_dev * dev)1717 static void pci_restore_pcie_state(struct pci_dev *dev)
1718 {
1719 int i = 0;
1720 struct pci_cap_saved_state *save_state;
1721 u16 *cap;
1722
1723 /*
1724 * Restore max latencies (in the LTR capability) before enabling
1725 * LTR itself in PCI_EXP_DEVCTL2.
1726 */
1727 pci_restore_ltr_state(dev);
1728 pci_restore_aspm_l1ss_state(dev);
1729
1730 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1731 if (!save_state)
1732 return;
1733
1734 /*
1735 * Downstream ports reset the LTR enable bit when link goes down.
1736 * Check and re-configure the bit here before restoring device.
1737 * PCIe r5.0, sec 7.5.3.16.
1738 */
1739 pci_bridge_reconfigure_ltr(dev);
1740
1741 cap = (u16 *)&save_state->cap.data[0];
1742 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1743 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1744 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1745 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1746 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1747 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1748 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1749 }
1750
pci_save_pcix_state(struct pci_dev * dev)1751 static int pci_save_pcix_state(struct pci_dev *dev)
1752 {
1753 int pos;
1754 struct pci_cap_saved_state *save_state;
1755
1756 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1757 if (!pos)
1758 return 0;
1759
1760 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1761 if (!save_state) {
1762 pci_err(dev, "buffer not found in %s\n", __func__);
1763 return -ENOMEM;
1764 }
1765
1766 pci_read_config_word(dev, pos + PCI_X_CMD,
1767 (u16 *)save_state->cap.data);
1768
1769 return 0;
1770 }
1771
pci_restore_pcix_state(struct pci_dev * dev)1772 static void pci_restore_pcix_state(struct pci_dev *dev)
1773 {
1774 int i = 0, pos;
1775 struct pci_cap_saved_state *save_state;
1776 u16 *cap;
1777
1778 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1779 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1780 if (!save_state || !pos)
1781 return;
1782 cap = (u16 *)&save_state->cap.data[0];
1783
1784 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1785 }
1786
1787 /**
1788 * pci_save_state - save the PCI configuration space of a device before
1789 * suspending
1790 * @dev: PCI device that we're dealing with
1791 */
pci_save_state(struct pci_dev * dev)1792 int pci_save_state(struct pci_dev *dev)
1793 {
1794 int i;
1795 /* XXX: 100% dword access ok here? */
1796 for (i = 0; i < 16; i++) {
1797 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1798 pci_dbg(dev, "save config %#04x: %#010x\n",
1799 i * 4, dev->saved_config_space[i]);
1800 }
1801 dev->state_saved = true;
1802
1803 i = pci_save_pcie_state(dev);
1804 if (i != 0)
1805 return i;
1806
1807 i = pci_save_pcix_state(dev);
1808 if (i != 0)
1809 return i;
1810
1811 pci_save_dpc_state(dev);
1812 pci_save_aer_state(dev);
1813 pci_save_ptm_state(dev);
1814 pci_save_tph_state(dev);
1815 return pci_save_vc_state(dev);
1816 }
1817 EXPORT_SYMBOL(pci_save_state);
1818
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1819 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1820 u32 saved_val, int retry, bool force)
1821 {
1822 u32 val;
1823
1824 pci_read_config_dword(pdev, offset, &val);
1825 if (!force && val == saved_val)
1826 return;
1827
1828 for (;;) {
1829 pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n",
1830 offset, val, saved_val);
1831 pci_write_config_dword(pdev, offset, saved_val);
1832 if (retry-- <= 0)
1833 return;
1834
1835 pci_read_config_dword(pdev, offset, &val);
1836 if (val == saved_val)
1837 return;
1838
1839 mdelay(1);
1840 }
1841 }
1842
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1843 static void pci_restore_config_space_range(struct pci_dev *pdev,
1844 int start, int end, int retry,
1845 bool force)
1846 {
1847 int index;
1848
1849 for (index = end; index >= start; index--)
1850 pci_restore_config_dword(pdev, 4 * index,
1851 pdev->saved_config_space[index],
1852 retry, force);
1853 }
1854
pci_restore_config_space(struct pci_dev * pdev)1855 static void pci_restore_config_space(struct pci_dev *pdev)
1856 {
1857 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1858 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1859 /* Restore BARs before the command register. */
1860 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1861 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1862 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1863 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1864
1865 /*
1866 * Force rewriting of prefetch registers to avoid S3 resume
1867 * issues on Intel PCI bridges that occur when these
1868 * registers are not explicitly written.
1869 */
1870 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1871 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1872 } else {
1873 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1874 }
1875 }
1876
pci_restore_rebar_state(struct pci_dev * pdev)1877 static void pci_restore_rebar_state(struct pci_dev *pdev)
1878 {
1879 unsigned int pos, nbars, i;
1880 u32 ctrl;
1881
1882 pos = pdev->rebar_cap;
1883 if (!pos)
1884 return;
1885
1886 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1887 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
1888
1889 for (i = 0; i < nbars; i++, pos += 8) {
1890 struct resource *res;
1891 int bar_idx, size;
1892
1893 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1894 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1895 res = pci_resource_n(pdev, bar_idx);
1896 size = pci_rebar_bytes_to_size(resource_size(res));
1897 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1898 ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
1899 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1900 }
1901 }
1902
1903 /**
1904 * pci_restore_state - Restore the saved state of a PCI device
1905 * @dev: PCI device that we're dealing with
1906 */
pci_restore_state(struct pci_dev * dev)1907 void pci_restore_state(struct pci_dev *dev)
1908 {
1909 if (!dev->state_saved)
1910 return;
1911
1912 pci_restore_pcie_state(dev);
1913 pci_restore_pasid_state(dev);
1914 pci_restore_pri_state(dev);
1915 pci_restore_ats_state(dev);
1916 pci_restore_vc_state(dev);
1917 pci_restore_rebar_state(dev);
1918 pci_restore_dpc_state(dev);
1919 pci_restore_ptm_state(dev);
1920 pci_restore_tph_state(dev);
1921
1922 pci_aer_clear_status(dev);
1923 pci_restore_aer_state(dev);
1924
1925 pci_restore_config_space(dev);
1926
1927 pci_restore_pcix_state(dev);
1928 pci_restore_msi_state(dev);
1929
1930 /* Restore ACS and IOV configuration state */
1931 pci_enable_acs(dev);
1932 pci_restore_iov_state(dev);
1933
1934 dev->state_saved = false;
1935 }
1936 EXPORT_SYMBOL(pci_restore_state);
1937
1938 struct pci_saved_state {
1939 u32 config_space[16];
1940 struct pci_cap_saved_data cap[];
1941 };
1942
1943 /**
1944 * pci_store_saved_state - Allocate and return an opaque struct containing
1945 * the device saved state.
1946 * @dev: PCI device that we're dealing with
1947 *
1948 * Return NULL if no state or error.
1949 */
pci_store_saved_state(struct pci_dev * dev)1950 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1951 {
1952 struct pci_saved_state *state;
1953 struct pci_cap_saved_state *tmp;
1954 struct pci_cap_saved_data *cap;
1955 size_t size;
1956
1957 if (!dev->state_saved)
1958 return NULL;
1959
1960 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1961
1962 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1963 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1964
1965 state = kzalloc(size, GFP_KERNEL);
1966 if (!state)
1967 return NULL;
1968
1969 memcpy(state->config_space, dev->saved_config_space,
1970 sizeof(state->config_space));
1971
1972 cap = state->cap;
1973 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1974 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1975 memcpy(cap, &tmp->cap, len);
1976 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1977 }
1978 /* Empty cap_save terminates list */
1979
1980 return state;
1981 }
1982 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1983
1984 /**
1985 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1986 * @dev: PCI device that we're dealing with
1987 * @state: Saved state returned from pci_store_saved_state()
1988 */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)1989 int pci_load_saved_state(struct pci_dev *dev,
1990 struct pci_saved_state *state)
1991 {
1992 struct pci_cap_saved_data *cap;
1993
1994 dev->state_saved = false;
1995
1996 if (!state)
1997 return 0;
1998
1999 memcpy(dev->saved_config_space, state->config_space,
2000 sizeof(state->config_space));
2001
2002 cap = state->cap;
2003 while (cap->size) {
2004 struct pci_cap_saved_state *tmp;
2005
2006 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
2007 if (!tmp || tmp->cap.size != cap->size)
2008 return -EINVAL;
2009
2010 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
2011 cap = (struct pci_cap_saved_data *)((u8 *)cap +
2012 sizeof(struct pci_cap_saved_data) + cap->size);
2013 }
2014
2015 dev->state_saved = true;
2016 return 0;
2017 }
2018 EXPORT_SYMBOL_GPL(pci_load_saved_state);
2019
2020 /**
2021 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
2022 * and free the memory allocated for it.
2023 * @dev: PCI device that we're dealing with
2024 * @state: Pointer to saved state returned from pci_store_saved_state()
2025 */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)2026 int pci_load_and_free_saved_state(struct pci_dev *dev,
2027 struct pci_saved_state **state)
2028 {
2029 int ret = pci_load_saved_state(dev, *state);
2030 kfree(*state);
2031 *state = NULL;
2032 return ret;
2033 }
2034 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
2035
pcibios_enable_device(struct pci_dev * dev,int bars)2036 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
2037 {
2038 return pci_enable_resources(dev, bars);
2039 }
2040
pci_host_bridge_enable_device(struct pci_dev * dev)2041 static int pci_host_bridge_enable_device(struct pci_dev *dev)
2042 {
2043 struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
2044 int err;
2045
2046 if (host_bridge && host_bridge->enable_device) {
2047 err = host_bridge->enable_device(host_bridge, dev);
2048 if (err)
2049 return err;
2050 }
2051
2052 return 0;
2053 }
2054
pci_host_bridge_disable_device(struct pci_dev * dev)2055 static void pci_host_bridge_disable_device(struct pci_dev *dev)
2056 {
2057 struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
2058
2059 if (host_bridge && host_bridge->disable_device)
2060 host_bridge->disable_device(host_bridge, dev);
2061 }
2062
do_pci_enable_device(struct pci_dev * dev,int bars)2063 static int do_pci_enable_device(struct pci_dev *dev, int bars)
2064 {
2065 int err;
2066 struct pci_dev *bridge;
2067 u16 cmd;
2068 u8 pin;
2069
2070 err = pci_set_power_state(dev, PCI_D0);
2071 if (err < 0 && err != -EIO)
2072 return err;
2073
2074 bridge = pci_upstream_bridge(dev);
2075 if (bridge)
2076 pcie_aspm_powersave_config_link(bridge);
2077
2078 err = pci_host_bridge_enable_device(dev);
2079 if (err)
2080 return err;
2081
2082 err = pcibios_enable_device(dev, bars);
2083 if (err < 0)
2084 goto err_enable;
2085 pci_fixup_device(pci_fixup_enable, dev);
2086
2087 if (dev->msi_enabled || dev->msix_enabled)
2088 return 0;
2089
2090 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
2091 if (pin) {
2092 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2093 if (cmd & PCI_COMMAND_INTX_DISABLE)
2094 pci_write_config_word(dev, PCI_COMMAND,
2095 cmd & ~PCI_COMMAND_INTX_DISABLE);
2096 }
2097
2098 return 0;
2099
2100 err_enable:
2101 pci_host_bridge_disable_device(dev);
2102
2103 return err;
2104
2105 }
2106
2107 /**
2108 * pci_reenable_device - Resume abandoned device
2109 * @dev: PCI device to be resumed
2110 *
2111 * NOTE: This function is a backend of pci_default_resume() and is not supposed
2112 * to be called by normal code, write proper resume handler and use it instead.
2113 */
pci_reenable_device(struct pci_dev * dev)2114 int pci_reenable_device(struct pci_dev *dev)
2115 {
2116 if (pci_is_enabled(dev))
2117 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
2118 return 0;
2119 }
2120 EXPORT_SYMBOL(pci_reenable_device);
2121
pci_enable_bridge(struct pci_dev * dev)2122 static void pci_enable_bridge(struct pci_dev *dev)
2123 {
2124 struct pci_dev *bridge;
2125 int retval;
2126
2127 bridge = pci_upstream_bridge(dev);
2128 if (bridge)
2129 pci_enable_bridge(bridge);
2130
2131 if (pci_is_enabled(dev)) {
2132 if (!dev->is_busmaster)
2133 pci_set_master(dev);
2134 return;
2135 }
2136
2137 retval = pci_enable_device(dev);
2138 if (retval)
2139 pci_err(dev, "Error enabling bridge (%d), continuing\n",
2140 retval);
2141 pci_set_master(dev);
2142 }
2143
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)2144 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
2145 {
2146 struct pci_dev *bridge;
2147 int err;
2148 int i, bars = 0;
2149
2150 /*
2151 * Power state could be unknown at this point, either due to a fresh
2152 * boot or a device removal call. So get the current power state
2153 * so that things like MSI message writing will behave as expected
2154 * (e.g. if the device really is in D0 at enable time).
2155 */
2156 pci_update_current_state(dev, dev->current_state);
2157
2158 if (atomic_inc_return(&dev->enable_cnt) > 1)
2159 return 0; /* already enabled */
2160
2161 bridge = pci_upstream_bridge(dev);
2162 if (bridge)
2163 pci_enable_bridge(bridge);
2164
2165 /* only skip sriov related */
2166 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2167 if (dev->resource[i].flags & flags)
2168 bars |= (1 << i);
2169 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2170 if (dev->resource[i].flags & flags)
2171 bars |= (1 << i);
2172
2173 err = do_pci_enable_device(dev, bars);
2174 if (err < 0)
2175 atomic_dec(&dev->enable_cnt);
2176 return err;
2177 }
2178
2179 /**
2180 * pci_enable_device_mem - Initialize a device for use with Memory space
2181 * @dev: PCI device to be initialized
2182 *
2183 * Initialize device before it's used by a driver. Ask low-level code
2184 * to enable Memory resources. Wake up the device if it was suspended.
2185 * Beware, this function can fail.
2186 */
pci_enable_device_mem(struct pci_dev * dev)2187 int pci_enable_device_mem(struct pci_dev *dev)
2188 {
2189 return pci_enable_device_flags(dev, IORESOURCE_MEM);
2190 }
2191 EXPORT_SYMBOL(pci_enable_device_mem);
2192
2193 /**
2194 * pci_enable_device - Initialize device before it's used by a driver.
2195 * @dev: PCI device to be initialized
2196 *
2197 * Initialize device before it's used by a driver. Ask low-level code
2198 * to enable I/O and memory. Wake up the device if it was suspended.
2199 * Beware, this function can fail.
2200 *
2201 * Note we don't actually enable the device many times if we call
2202 * this function repeatedly (we just increment the count).
2203 */
pci_enable_device(struct pci_dev * dev)2204 int pci_enable_device(struct pci_dev *dev)
2205 {
2206 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2207 }
2208 EXPORT_SYMBOL(pci_enable_device);
2209
2210 /*
2211 * pcibios_device_add - provide arch specific hooks when adding device dev
2212 * @dev: the PCI device being added
2213 *
2214 * Permits the platform to provide architecture specific functionality when
2215 * devices are added. This is the default implementation. Architecture
2216 * implementations can override this.
2217 */
pcibios_device_add(struct pci_dev * dev)2218 int __weak pcibios_device_add(struct pci_dev *dev)
2219 {
2220 return 0;
2221 }
2222
2223 /**
2224 * pcibios_release_device - provide arch specific hooks when releasing
2225 * device dev
2226 * @dev: the PCI device being released
2227 *
2228 * Permits the platform to provide architecture specific functionality when
2229 * devices are released. This is the default implementation. Architecture
2230 * implementations can override this.
2231 */
pcibios_release_device(struct pci_dev * dev)2232 void __weak pcibios_release_device(struct pci_dev *dev) {}
2233
2234 /**
2235 * pcibios_disable_device - disable arch specific PCI resources for device dev
2236 * @dev: the PCI device to disable
2237 *
2238 * Disables architecture specific PCI resources for the device. This
2239 * is the default implementation. Architecture implementations can
2240 * override this.
2241 */
pcibios_disable_device(struct pci_dev * dev)2242 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2243
do_pci_disable_device(struct pci_dev * dev)2244 static void do_pci_disable_device(struct pci_dev *dev)
2245 {
2246 u16 pci_command;
2247
2248 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2249 if (pci_command & PCI_COMMAND_MASTER) {
2250 pci_command &= ~PCI_COMMAND_MASTER;
2251 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2252 }
2253
2254 pcibios_disable_device(dev);
2255 }
2256
2257 /**
2258 * pci_disable_enabled_device - Disable device without updating enable_cnt
2259 * @dev: PCI device to disable
2260 *
2261 * NOTE: This function is a backend of PCI power management routines and is
2262 * not supposed to be called drivers.
2263 */
pci_disable_enabled_device(struct pci_dev * dev)2264 void pci_disable_enabled_device(struct pci_dev *dev)
2265 {
2266 if (pci_is_enabled(dev))
2267 do_pci_disable_device(dev);
2268 }
2269
2270 /**
2271 * pci_disable_device - Disable PCI device after use
2272 * @dev: PCI device to be disabled
2273 *
2274 * Signal to the system that the PCI device is not in use by the system
2275 * anymore. This only involves disabling PCI bus-mastering, if active.
2276 *
2277 * Note we don't actually disable the device until all callers of
2278 * pci_enable_device() have called pci_disable_device().
2279 */
pci_disable_device(struct pci_dev * dev)2280 void pci_disable_device(struct pci_dev *dev)
2281 {
2282 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2283 "disabling already-disabled device");
2284
2285 if (atomic_dec_return(&dev->enable_cnt) != 0)
2286 return;
2287
2288 pci_host_bridge_disable_device(dev);
2289
2290 do_pci_disable_device(dev);
2291
2292 dev->is_busmaster = 0;
2293 }
2294 EXPORT_SYMBOL(pci_disable_device);
2295
2296 /**
2297 * pcibios_set_pcie_reset_state - set reset state for device dev
2298 * @dev: the PCIe device reset
2299 * @state: Reset state to enter into
2300 *
2301 * Set the PCIe reset state for the device. This is the default
2302 * implementation. Architecture implementations can override this.
2303 */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2304 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2305 enum pcie_reset_state state)
2306 {
2307 return -EINVAL;
2308 }
2309
2310 /**
2311 * pci_set_pcie_reset_state - set reset state for device dev
2312 * @dev: the PCIe device reset
2313 * @state: Reset state to enter into
2314 *
2315 * Sets the PCI reset state for the device.
2316 */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2317 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2318 {
2319 return pcibios_set_pcie_reset_state(dev, state);
2320 }
2321 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2322
2323 #ifdef CONFIG_PCIEAER
pcie_clear_device_status(struct pci_dev * dev)2324 void pcie_clear_device_status(struct pci_dev *dev)
2325 {
2326 u16 sta;
2327
2328 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2329 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2330 }
2331 #endif
2332
2333 /**
2334 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2335 * @dev: PCIe root port or event collector.
2336 */
pcie_clear_root_pme_status(struct pci_dev * dev)2337 void pcie_clear_root_pme_status(struct pci_dev *dev)
2338 {
2339 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2340 }
2341
2342 /**
2343 * pci_check_pme_status - Check if given device has generated PME.
2344 * @dev: Device to check.
2345 *
2346 * Check the PME status of the device and if set, clear it and clear PME enable
2347 * (if set). Return 'true' if PME status and PME enable were both set or
2348 * 'false' otherwise.
2349 */
pci_check_pme_status(struct pci_dev * dev)2350 bool pci_check_pme_status(struct pci_dev *dev)
2351 {
2352 int pmcsr_pos;
2353 u16 pmcsr;
2354 bool ret = false;
2355
2356 if (!dev->pm_cap)
2357 return false;
2358
2359 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2360 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2361 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2362 return false;
2363
2364 /* Clear PME status. */
2365 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2366 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2367 /* Disable PME to avoid interrupt flood. */
2368 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2369 ret = true;
2370 }
2371
2372 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2373
2374 return ret;
2375 }
2376
2377 /**
2378 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2379 * @dev: Device to handle.
2380 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2381 *
2382 * Check if @dev has generated PME and queue a resume request for it in that
2383 * case.
2384 */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2385 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2386 {
2387 if (pme_poll_reset && dev->pme_poll)
2388 dev->pme_poll = false;
2389
2390 if (pci_check_pme_status(dev)) {
2391 pci_wakeup_event(dev);
2392 pm_request_resume(&dev->dev);
2393 }
2394 return 0;
2395 }
2396
2397 /**
2398 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2399 * @bus: Top bus of the subtree to walk.
2400 */
pci_pme_wakeup_bus(struct pci_bus * bus)2401 void pci_pme_wakeup_bus(struct pci_bus *bus)
2402 {
2403 if (bus)
2404 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2405 }
2406
2407
2408 /**
2409 * pci_pme_capable - check the capability of PCI device to generate PME#
2410 * @dev: PCI device to handle.
2411 * @state: PCI state from which device will issue PME#.
2412 */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2413 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2414 {
2415 if (!dev->pm_cap)
2416 return false;
2417
2418 return !!(dev->pme_support & (1 << state));
2419 }
2420 EXPORT_SYMBOL(pci_pme_capable);
2421
pci_pme_list_scan(struct work_struct * work)2422 static void pci_pme_list_scan(struct work_struct *work)
2423 {
2424 struct pci_pme_device *pme_dev, *n;
2425
2426 mutex_lock(&pci_pme_list_mutex);
2427 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2428 struct pci_dev *pdev = pme_dev->dev;
2429
2430 if (pdev->pme_poll) {
2431 struct pci_dev *bridge = pdev->bus->self;
2432 struct device *dev = &pdev->dev;
2433 struct device *bdev = bridge ? &bridge->dev : NULL;
2434 int bref = 0;
2435
2436 /*
2437 * If we have a bridge, it should be in an active/D0
2438 * state or the configuration space of subordinate
2439 * devices may not be accessible or stable over the
2440 * course of the call.
2441 */
2442 if (bdev) {
2443 bref = pm_runtime_get_if_active(bdev);
2444 if (!bref)
2445 continue;
2446
2447 if (bridge->current_state != PCI_D0)
2448 goto put_bridge;
2449 }
2450
2451 /*
2452 * The device itself should be suspended but config
2453 * space must be accessible, therefore it cannot be in
2454 * D3cold.
2455 */
2456 if (pm_runtime_suspended(dev) &&
2457 pdev->current_state != PCI_D3cold)
2458 pci_pme_wakeup(pdev, NULL);
2459
2460 put_bridge:
2461 if (bref > 0)
2462 pm_runtime_put(bdev);
2463 } else {
2464 list_del(&pme_dev->list);
2465 kfree(pme_dev);
2466 }
2467 }
2468 if (!list_empty(&pci_pme_list))
2469 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2470 msecs_to_jiffies(PME_TIMEOUT));
2471 mutex_unlock(&pci_pme_list_mutex);
2472 }
2473
__pci_pme_active(struct pci_dev * dev,bool enable)2474 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2475 {
2476 u16 pmcsr;
2477
2478 if (!dev->pme_support)
2479 return;
2480
2481 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2482 /* Clear PME_Status by writing 1 to it and enable PME# */
2483 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2484 if (!enable)
2485 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2486
2487 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2488 }
2489
2490 /**
2491 * pci_pme_restore - Restore PME configuration after config space restore.
2492 * @dev: PCI device to update.
2493 */
pci_pme_restore(struct pci_dev * dev)2494 void pci_pme_restore(struct pci_dev *dev)
2495 {
2496 u16 pmcsr;
2497
2498 if (!dev->pme_support)
2499 return;
2500
2501 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2502 if (dev->wakeup_prepared) {
2503 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2504 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2505 } else {
2506 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2507 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2508 }
2509 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2510 }
2511
2512 /**
2513 * pci_pme_active - enable or disable PCI device's PME# function
2514 * @dev: PCI device to handle.
2515 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2516 *
2517 * The caller must verify that the device is capable of generating PME# before
2518 * calling this function with @enable equal to 'true'.
2519 */
pci_pme_active(struct pci_dev * dev,bool enable)2520 void pci_pme_active(struct pci_dev *dev, bool enable)
2521 {
2522 __pci_pme_active(dev, enable);
2523
2524 /*
2525 * PCI (as opposed to PCIe) PME requires that the device have
2526 * its PME# line hooked up correctly. Not all hardware vendors
2527 * do this, so the PME never gets delivered and the device
2528 * remains asleep. The easiest way around this is to
2529 * periodically walk the list of suspended devices and check
2530 * whether any have their PME flag set. The assumption is that
2531 * we'll wake up often enough anyway that this won't be a huge
2532 * hit, and the power savings from the devices will still be a
2533 * win.
2534 *
2535 * Although PCIe uses in-band PME message instead of PME# line
2536 * to report PME, PME does not work for some PCIe devices in
2537 * reality. For example, there are devices that set their PME
2538 * status bits, but don't really bother to send a PME message;
2539 * there are PCI Express Root Ports that don't bother to
2540 * trigger interrupts when they receive PME messages from the
2541 * devices below. So PME poll is used for PCIe devices too.
2542 */
2543
2544 if (dev->pme_poll) {
2545 struct pci_pme_device *pme_dev;
2546 if (enable) {
2547 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2548 GFP_KERNEL);
2549 if (!pme_dev) {
2550 pci_warn(dev, "can't enable PME#\n");
2551 return;
2552 }
2553 pme_dev->dev = dev;
2554 mutex_lock(&pci_pme_list_mutex);
2555 list_add(&pme_dev->list, &pci_pme_list);
2556 if (list_is_singular(&pci_pme_list))
2557 queue_delayed_work(system_freezable_wq,
2558 &pci_pme_work,
2559 msecs_to_jiffies(PME_TIMEOUT));
2560 mutex_unlock(&pci_pme_list_mutex);
2561 } else {
2562 mutex_lock(&pci_pme_list_mutex);
2563 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2564 if (pme_dev->dev == dev) {
2565 list_del(&pme_dev->list);
2566 kfree(pme_dev);
2567 break;
2568 }
2569 }
2570 mutex_unlock(&pci_pme_list_mutex);
2571 }
2572 }
2573
2574 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2575 }
2576 EXPORT_SYMBOL(pci_pme_active);
2577
2578 /**
2579 * __pci_enable_wake - enable PCI device as wakeup event source
2580 * @dev: PCI device affected
2581 * @state: PCI state from which device will issue wakeup events
2582 * @enable: True to enable event generation; false to disable
2583 *
2584 * This enables the device as a wakeup event source, or disables it.
2585 * When such events involves platform-specific hooks, those hooks are
2586 * called automatically by this routine.
2587 *
2588 * Devices with legacy power management (no standard PCI PM capabilities)
2589 * always require such platform hooks.
2590 *
2591 * RETURN VALUE:
2592 * 0 is returned on success
2593 * -EINVAL is returned if device is not supposed to wake up the system
2594 * Error code depending on the platform is returned if both the platform and
2595 * the native mechanism fail to enable the generation of wake-up events
2596 */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2597 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2598 {
2599 int ret = 0;
2600
2601 /*
2602 * Bridges that are not power-manageable directly only signal
2603 * wakeup on behalf of subordinate devices which is set up
2604 * elsewhere, so skip them. However, bridges that are
2605 * power-manageable may signal wakeup for themselves (for example,
2606 * on a hotplug event) and they need to be covered here.
2607 */
2608 if (!pci_power_manageable(dev))
2609 return 0;
2610
2611 /* Don't do the same thing twice in a row for one device. */
2612 if (!!enable == !!dev->wakeup_prepared)
2613 return 0;
2614
2615 /*
2616 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2617 * Anderson we should be doing PME# wake enable followed by ACPI wake
2618 * enable. To disable wake-up we call the platform first, for symmetry.
2619 */
2620
2621 if (enable) {
2622 int error;
2623
2624 /*
2625 * Enable PME signaling if the device can signal PME from
2626 * D3cold regardless of whether or not it can signal PME from
2627 * the current target state, because that will allow it to
2628 * signal PME when the hierarchy above it goes into D3cold and
2629 * the device itself ends up in D3cold as a result of that.
2630 */
2631 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2632 pci_pme_active(dev, true);
2633 else
2634 ret = 1;
2635 error = platform_pci_set_wakeup(dev, true);
2636 if (ret)
2637 ret = error;
2638 if (!ret)
2639 dev->wakeup_prepared = true;
2640 } else {
2641 platform_pci_set_wakeup(dev, false);
2642 pci_pme_active(dev, false);
2643 dev->wakeup_prepared = false;
2644 }
2645
2646 return ret;
2647 }
2648
2649 /**
2650 * pci_enable_wake - change wakeup settings for a PCI device
2651 * @pci_dev: Target device
2652 * @state: PCI state from which device will issue wakeup events
2653 * @enable: Whether or not to enable event generation
2654 *
2655 * If @enable is set, check device_may_wakeup() for the device before calling
2656 * __pci_enable_wake() for it.
2657 */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2658 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2659 {
2660 if (enable && !device_may_wakeup(&pci_dev->dev))
2661 return -EINVAL;
2662
2663 return __pci_enable_wake(pci_dev, state, enable);
2664 }
2665 EXPORT_SYMBOL(pci_enable_wake);
2666
2667 /**
2668 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2669 * @dev: PCI device to prepare
2670 * @enable: True to enable wake-up event generation; false to disable
2671 *
2672 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2673 * and this function allows them to set that up cleanly - pci_enable_wake()
2674 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2675 * ordering constraints.
2676 *
2677 * This function only returns error code if the device is not allowed to wake
2678 * up the system from sleep or it is not capable of generating PME# from both
2679 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2680 */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2681 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2682 {
2683 return pci_pme_capable(dev, PCI_D3cold) ?
2684 pci_enable_wake(dev, PCI_D3cold, enable) :
2685 pci_enable_wake(dev, PCI_D3hot, enable);
2686 }
2687 EXPORT_SYMBOL(pci_wake_from_d3);
2688
2689 /**
2690 * pci_target_state - find an appropriate low power state for a given PCI dev
2691 * @dev: PCI device
2692 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2693 *
2694 * Use underlying platform code to find a supported low power state for @dev.
2695 * If the platform can't manage @dev, return the deepest state from which it
2696 * can generate wake events, based on any available PME info.
2697 */
pci_target_state(struct pci_dev * dev,bool wakeup)2698 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2699 {
2700 if (platform_pci_power_manageable(dev)) {
2701 /*
2702 * Call the platform to find the target state for the device.
2703 */
2704 pci_power_t state = platform_pci_choose_state(dev);
2705
2706 switch (state) {
2707 case PCI_POWER_ERROR:
2708 case PCI_UNKNOWN:
2709 return PCI_D3hot;
2710
2711 case PCI_D1:
2712 case PCI_D2:
2713 if (pci_no_d1d2(dev))
2714 return PCI_D3hot;
2715 }
2716
2717 return state;
2718 }
2719
2720 /*
2721 * If the device is in D3cold even though it's not power-manageable by
2722 * the platform, it may have been powered down by non-standard means.
2723 * Best to let it slumber.
2724 */
2725 if (dev->current_state == PCI_D3cold)
2726 return PCI_D3cold;
2727 else if (!dev->pm_cap)
2728 return PCI_D0;
2729
2730 if (wakeup && dev->pme_support) {
2731 pci_power_t state = PCI_D3hot;
2732
2733 /*
2734 * Find the deepest state from which the device can generate
2735 * PME#.
2736 */
2737 while (state && !(dev->pme_support & (1 << state)))
2738 state--;
2739
2740 if (state)
2741 return state;
2742 else if (dev->pme_support & 1)
2743 return PCI_D0;
2744 }
2745
2746 return PCI_D3hot;
2747 }
2748
2749 /**
2750 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2751 * into a sleep state
2752 * @dev: Device to handle.
2753 *
2754 * Choose the power state appropriate for the device depending on whether
2755 * it can wake up the system and/or is power manageable by the platform
2756 * (PCI_D3hot is the default) and put the device into that state.
2757 */
pci_prepare_to_sleep(struct pci_dev * dev)2758 int pci_prepare_to_sleep(struct pci_dev *dev)
2759 {
2760 bool wakeup = device_may_wakeup(&dev->dev);
2761 pci_power_t target_state = pci_target_state(dev, wakeup);
2762 int error;
2763
2764 if (target_state == PCI_POWER_ERROR)
2765 return -EIO;
2766
2767 pci_enable_wake(dev, target_state, wakeup);
2768
2769 error = pci_set_power_state(dev, target_state);
2770
2771 if (error)
2772 pci_enable_wake(dev, target_state, false);
2773
2774 return error;
2775 }
2776 EXPORT_SYMBOL(pci_prepare_to_sleep);
2777
2778 /**
2779 * pci_back_from_sleep - turn PCI device on during system-wide transition
2780 * into working state
2781 * @dev: Device to handle.
2782 *
2783 * Disable device's system wake-up capability and put it into D0.
2784 */
pci_back_from_sleep(struct pci_dev * dev)2785 int pci_back_from_sleep(struct pci_dev *dev)
2786 {
2787 int ret = pci_set_power_state(dev, PCI_D0);
2788
2789 if (ret)
2790 return ret;
2791
2792 pci_enable_wake(dev, PCI_D0, false);
2793 return 0;
2794 }
2795 EXPORT_SYMBOL(pci_back_from_sleep);
2796
2797 /**
2798 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2799 * @dev: PCI device being suspended.
2800 *
2801 * Prepare @dev to generate wake-up events at run time and put it into a low
2802 * power state.
2803 */
pci_finish_runtime_suspend(struct pci_dev * dev)2804 int pci_finish_runtime_suspend(struct pci_dev *dev)
2805 {
2806 pci_power_t target_state;
2807 int error;
2808
2809 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2810 if (target_state == PCI_POWER_ERROR)
2811 return -EIO;
2812
2813 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2814
2815 error = pci_set_power_state(dev, target_state);
2816
2817 if (error)
2818 pci_enable_wake(dev, target_state, false);
2819
2820 return error;
2821 }
2822
2823 /**
2824 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2825 * @dev: Device to check.
2826 *
2827 * Return true if the device itself is capable of generating wake-up events
2828 * (through the platform or using the native PCIe PME) or if the device supports
2829 * PME and one of its upstream bridges can generate wake-up events.
2830 */
pci_dev_run_wake(struct pci_dev * dev)2831 bool pci_dev_run_wake(struct pci_dev *dev)
2832 {
2833 struct pci_bus *bus = dev->bus;
2834
2835 if (!dev->pme_support)
2836 return false;
2837
2838 /* PME-capable in principle, but not from the target power state */
2839 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2840 return false;
2841
2842 if (device_can_wakeup(&dev->dev))
2843 return true;
2844
2845 while (bus->parent) {
2846 struct pci_dev *bridge = bus->self;
2847
2848 if (device_can_wakeup(&bridge->dev))
2849 return true;
2850
2851 bus = bus->parent;
2852 }
2853
2854 /* We have reached the root bus. */
2855 if (bus->bridge)
2856 return device_can_wakeup(bus->bridge);
2857
2858 return false;
2859 }
2860 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2861
2862 /**
2863 * pci_dev_need_resume - Check if it is necessary to resume the device.
2864 * @pci_dev: Device to check.
2865 *
2866 * Return 'true' if the device is not runtime-suspended or it has to be
2867 * reconfigured due to wakeup settings difference between system and runtime
2868 * suspend, or the current power state of it is not suitable for the upcoming
2869 * (system-wide) transition.
2870 */
pci_dev_need_resume(struct pci_dev * pci_dev)2871 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2872 {
2873 struct device *dev = &pci_dev->dev;
2874 pci_power_t target_state;
2875
2876 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2877 return true;
2878
2879 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2880
2881 /*
2882 * If the earlier platform check has not triggered, D3cold is just power
2883 * removal on top of D3hot, so no need to resume the device in that
2884 * case.
2885 */
2886 return target_state != pci_dev->current_state &&
2887 target_state != PCI_D3cold &&
2888 pci_dev->current_state != PCI_D3hot;
2889 }
2890
2891 /**
2892 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2893 * @pci_dev: Device to check.
2894 *
2895 * If the device is suspended and it is not configured for system wakeup,
2896 * disable PME for it to prevent it from waking up the system unnecessarily.
2897 *
2898 * Note that if the device's power state is D3cold and the platform check in
2899 * pci_dev_need_resume() has not triggered, the device's configuration need not
2900 * be changed.
2901 */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2902 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2903 {
2904 struct device *dev = &pci_dev->dev;
2905
2906 spin_lock_irq(&dev->power.lock);
2907
2908 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2909 pci_dev->current_state < PCI_D3cold)
2910 __pci_pme_active(pci_dev, false);
2911
2912 spin_unlock_irq(&dev->power.lock);
2913 }
2914
2915 /**
2916 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2917 * @pci_dev: Device to handle.
2918 *
2919 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2920 * it might have been disabled during the prepare phase of system suspend if
2921 * the device was not configured for system wakeup.
2922 */
pci_dev_complete_resume(struct pci_dev * pci_dev)2923 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2924 {
2925 struct device *dev = &pci_dev->dev;
2926
2927 if (!pci_dev_run_wake(pci_dev))
2928 return;
2929
2930 spin_lock_irq(&dev->power.lock);
2931
2932 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2933 __pci_pme_active(pci_dev, true);
2934
2935 spin_unlock_irq(&dev->power.lock);
2936 }
2937
2938 /**
2939 * pci_choose_state - Choose the power state of a PCI device.
2940 * @dev: Target PCI device.
2941 * @state: Target state for the whole system.
2942 *
2943 * Returns PCI power state suitable for @dev and @state.
2944 */
pci_choose_state(struct pci_dev * dev,pm_message_t state)2945 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2946 {
2947 if (state.event == PM_EVENT_ON)
2948 return PCI_D0;
2949
2950 return pci_target_state(dev, false);
2951 }
2952 EXPORT_SYMBOL(pci_choose_state);
2953
pci_config_pm_runtime_get(struct pci_dev * pdev)2954 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2955 {
2956 struct device *dev = &pdev->dev;
2957 struct device *parent = dev->parent;
2958
2959 if (parent)
2960 pm_runtime_get_sync(parent);
2961 pm_runtime_get_noresume(dev);
2962 /*
2963 * pdev->current_state is set to PCI_D3cold during suspending,
2964 * so wait until suspending completes
2965 */
2966 pm_runtime_barrier(dev);
2967 /*
2968 * Only need to resume devices in D3cold, because config
2969 * registers are still accessible for devices suspended but
2970 * not in D3cold.
2971 */
2972 if (pdev->current_state == PCI_D3cold)
2973 pm_runtime_resume(dev);
2974 }
2975
pci_config_pm_runtime_put(struct pci_dev * pdev)2976 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2977 {
2978 struct device *dev = &pdev->dev;
2979 struct device *parent = dev->parent;
2980
2981 pm_runtime_put(dev);
2982 if (parent)
2983 pm_runtime_put_sync(parent);
2984 }
2985
2986 static const struct dmi_system_id bridge_d3_blacklist[] = {
2987 #ifdef CONFIG_X86
2988 {
2989 /*
2990 * Gigabyte X299 root port is not marked as hotplug capable
2991 * which allows Linux to power manage it. However, this
2992 * confuses the BIOS SMI handler so don't power manage root
2993 * ports on that system.
2994 */
2995 .ident = "X299 DESIGNARE EX-CF",
2996 .matches = {
2997 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2998 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2999 },
3000 },
3001 {
3002 /*
3003 * Downstream device is not accessible after putting a root port
3004 * into D3cold and back into D0 on Elo Continental Z2 board
3005 */
3006 .ident = "Elo Continental Z2",
3007 .matches = {
3008 DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
3009 DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
3010 DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
3011 },
3012 },
3013 {
3014 /*
3015 * Changing power state of root port dGPU is connected fails
3016 * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
3017 */
3018 .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
3019 .matches = {
3020 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
3021 DMI_MATCH(DMI_BOARD_NAME, "1972"),
3022 DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
3023 },
3024 },
3025 #endif
3026 { }
3027 };
3028
3029 /**
3030 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
3031 * @bridge: Bridge to check
3032 *
3033 * This function checks if it is possible to move the bridge to D3.
3034 * Currently we only allow D3 for some PCIe ports and for Thunderbolt.
3035 */
pci_bridge_d3_possible(struct pci_dev * bridge)3036 bool pci_bridge_d3_possible(struct pci_dev *bridge)
3037 {
3038 if (!pci_is_pcie(bridge))
3039 return false;
3040
3041 switch (pci_pcie_type(bridge)) {
3042 case PCI_EXP_TYPE_ROOT_PORT:
3043 case PCI_EXP_TYPE_UPSTREAM:
3044 case PCI_EXP_TYPE_DOWNSTREAM:
3045 if (pci_bridge_d3_disable)
3046 return false;
3047
3048 /*
3049 * Hotplug ports handled by firmware in System Management Mode
3050 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
3051 */
3052 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
3053 return false;
3054
3055 if (pci_bridge_d3_force)
3056 return true;
3057
3058 /* Even the oldest 2010 Thunderbolt controller supports D3. */
3059 if (bridge->is_thunderbolt)
3060 return true;
3061
3062 /* Platform might know better if the bridge supports D3 */
3063 if (platform_pci_bridge_d3(bridge))
3064 return true;
3065
3066 /*
3067 * Hotplug ports handled natively by the OS were not validated
3068 * by vendors for runtime D3 at least until 2018 because there
3069 * was no OS support.
3070 */
3071 if (bridge->is_hotplug_bridge)
3072 return false;
3073
3074 if (dmi_check_system(bridge_d3_blacklist))
3075 return false;
3076
3077 /*
3078 * Out of caution, we only allow PCIe ports from 2015 or newer
3079 * into D3 on x86.
3080 */
3081 if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015)
3082 return true;
3083 break;
3084 }
3085
3086 return false;
3087 }
3088
pci_dev_check_d3cold(struct pci_dev * dev,void * data)3089 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3090 {
3091 bool *d3cold_ok = data;
3092
3093 if (/* The device needs to be allowed to go D3cold ... */
3094 dev->no_d3cold || !dev->d3cold_allowed ||
3095
3096 /* ... and if it is wakeup capable to do so from D3cold. */
3097 (device_may_wakeup(&dev->dev) &&
3098 !pci_pme_capable(dev, PCI_D3cold)) ||
3099
3100 /* If it is a bridge it must be allowed to go to D3. */
3101 !pci_power_manageable(dev))
3102
3103 *d3cold_ok = false;
3104
3105 return !*d3cold_ok;
3106 }
3107
3108 /*
3109 * pci_bridge_d3_update - Update bridge D3 capabilities
3110 * @dev: PCI device which is changed
3111 *
3112 * Update upstream bridge PM capabilities accordingly depending on if the
3113 * device PM configuration was changed or the device is being removed. The
3114 * change is also propagated upstream.
3115 */
pci_bridge_d3_update(struct pci_dev * dev)3116 void pci_bridge_d3_update(struct pci_dev *dev)
3117 {
3118 bool remove = !device_is_registered(&dev->dev);
3119 struct pci_dev *bridge;
3120 bool d3cold_ok = true;
3121
3122 bridge = pci_upstream_bridge(dev);
3123 if (!bridge || !pci_bridge_d3_possible(bridge))
3124 return;
3125
3126 /*
3127 * If D3 is currently allowed for the bridge, removing one of its
3128 * children won't change that.
3129 */
3130 if (remove && bridge->bridge_d3)
3131 return;
3132
3133 /*
3134 * If D3 is currently allowed for the bridge and a child is added or
3135 * changed, disallowance of D3 can only be caused by that child, so
3136 * we only need to check that single device, not any of its siblings.
3137 *
3138 * If D3 is currently not allowed for the bridge, checking the device
3139 * first may allow us to skip checking its siblings.
3140 */
3141 if (!remove)
3142 pci_dev_check_d3cold(dev, &d3cold_ok);
3143
3144 /*
3145 * If D3 is currently not allowed for the bridge, this may be caused
3146 * either by the device being changed/removed or any of its siblings,
3147 * so we need to go through all children to find out if one of them
3148 * continues to block D3.
3149 */
3150 if (d3cold_ok && !bridge->bridge_d3)
3151 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3152 &d3cold_ok);
3153
3154 if (bridge->bridge_d3 != d3cold_ok) {
3155 bridge->bridge_d3 = d3cold_ok;
3156 /* Propagate change to upstream bridges */
3157 pci_bridge_d3_update(bridge);
3158 }
3159 }
3160
3161 /**
3162 * pci_d3cold_enable - Enable D3cold for device
3163 * @dev: PCI device to handle
3164 *
3165 * This function can be used in drivers to enable D3cold from the device
3166 * they handle. It also updates upstream PCI bridge PM capabilities
3167 * accordingly.
3168 */
pci_d3cold_enable(struct pci_dev * dev)3169 void pci_d3cold_enable(struct pci_dev *dev)
3170 {
3171 if (dev->no_d3cold) {
3172 dev->no_d3cold = false;
3173 pci_bridge_d3_update(dev);
3174 }
3175 }
3176 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3177
3178 /**
3179 * pci_d3cold_disable - Disable D3cold for device
3180 * @dev: PCI device to handle
3181 *
3182 * This function can be used in drivers to disable D3cold from the device
3183 * they handle. It also updates upstream PCI bridge PM capabilities
3184 * accordingly.
3185 */
pci_d3cold_disable(struct pci_dev * dev)3186 void pci_d3cold_disable(struct pci_dev *dev)
3187 {
3188 if (!dev->no_d3cold) {
3189 dev->no_d3cold = true;
3190 pci_bridge_d3_update(dev);
3191 }
3192 }
3193 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3194
pci_pm_power_up_and_verify_state(struct pci_dev * pci_dev)3195 void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
3196 {
3197 pci_power_up(pci_dev);
3198 pci_update_current_state(pci_dev, PCI_D0);
3199 }
3200
3201 /**
3202 * pci_pm_init - Initialize PM functions of given PCI device
3203 * @dev: PCI device to handle.
3204 */
pci_pm_init(struct pci_dev * dev)3205 void pci_pm_init(struct pci_dev *dev)
3206 {
3207 int pm;
3208 u16 status;
3209 u16 pmc;
3210
3211 device_enable_async_suspend(&dev->dev);
3212 dev->wakeup_prepared = false;
3213
3214 dev->pm_cap = 0;
3215 dev->pme_support = 0;
3216
3217 /* find PCI PM capability in list */
3218 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3219 if (!pm)
3220 goto poweron;
3221 /* Check device's ability to generate PME# */
3222 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3223
3224 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3225 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3226 pmc & PCI_PM_CAP_VER_MASK);
3227 goto poweron;
3228 }
3229
3230 dev->pm_cap = pm;
3231 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3232 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3233 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3234 dev->d3cold_allowed = true;
3235
3236 dev->d1_support = false;
3237 dev->d2_support = false;
3238 if (!pci_no_d1d2(dev)) {
3239 if (pmc & PCI_PM_CAP_D1)
3240 dev->d1_support = true;
3241 if (pmc & PCI_PM_CAP_D2)
3242 dev->d2_support = true;
3243
3244 if (dev->d1_support || dev->d2_support)
3245 pci_info(dev, "supports%s%s\n",
3246 dev->d1_support ? " D1" : "",
3247 dev->d2_support ? " D2" : "");
3248 }
3249
3250 pmc &= PCI_PM_CAP_PME_MASK;
3251 if (pmc) {
3252 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3253 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3254 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3255 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3256 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3257 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3258 dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
3259 dev->pme_poll = true;
3260 /*
3261 * Make device's PM flags reflect the wake-up capability, but
3262 * let the user space enable it to wake up the system as needed.
3263 */
3264 device_set_wakeup_capable(&dev->dev, true);
3265 /* Disable the PME# generation functionality */
3266 pci_pme_active(dev, false);
3267 }
3268
3269 pci_read_config_word(dev, PCI_STATUS, &status);
3270 if (status & PCI_STATUS_IMM_READY)
3271 dev->imm_ready = 1;
3272 poweron:
3273 pci_pm_power_up_and_verify_state(dev);
3274 pm_runtime_forbid(&dev->dev);
3275 pm_runtime_set_active(&dev->dev);
3276 pm_runtime_enable(&dev->dev);
3277 }
3278
pci_ea_flags(struct pci_dev * dev,u8 prop)3279 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3280 {
3281 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3282
3283 switch (prop) {
3284 case PCI_EA_P_MEM:
3285 case PCI_EA_P_VF_MEM:
3286 flags |= IORESOURCE_MEM;
3287 break;
3288 case PCI_EA_P_MEM_PREFETCH:
3289 case PCI_EA_P_VF_MEM_PREFETCH:
3290 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3291 break;
3292 case PCI_EA_P_IO:
3293 flags |= IORESOURCE_IO;
3294 break;
3295 default:
3296 return 0;
3297 }
3298
3299 return flags;
3300 }
3301
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)3302 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3303 u8 prop)
3304 {
3305 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3306 return &dev->resource[bei];
3307 #ifdef CONFIG_PCI_IOV
3308 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3309 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3310 return &dev->resource[PCI_IOV_RESOURCES +
3311 bei - PCI_EA_BEI_VF_BAR0];
3312 #endif
3313 else if (bei == PCI_EA_BEI_ROM)
3314 return &dev->resource[PCI_ROM_RESOURCE];
3315 else
3316 return NULL;
3317 }
3318
3319 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)3320 static int pci_ea_read(struct pci_dev *dev, int offset)
3321 {
3322 struct resource *res;
3323 const char *res_name;
3324 int ent_size, ent_offset = offset;
3325 resource_size_t start, end;
3326 unsigned long flags;
3327 u32 dw0, bei, base, max_offset;
3328 u8 prop;
3329 bool support_64 = (sizeof(resource_size_t) >= 8);
3330
3331 pci_read_config_dword(dev, ent_offset, &dw0);
3332 ent_offset += 4;
3333
3334 /* Entry size field indicates DWORDs after 1st */
3335 ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2;
3336
3337 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3338 goto out;
3339
3340 bei = FIELD_GET(PCI_EA_BEI, dw0);
3341 prop = FIELD_GET(PCI_EA_PP, dw0);
3342
3343 /*
3344 * If the Property is in the reserved range, try the Secondary
3345 * Property instead.
3346 */
3347 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3348 prop = FIELD_GET(PCI_EA_SP, dw0);
3349 if (prop > PCI_EA_P_BRIDGE_IO)
3350 goto out;
3351
3352 res = pci_ea_get_resource(dev, bei, prop);
3353 res_name = pci_resource_name(dev, bei);
3354 if (!res) {
3355 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3356 goto out;
3357 }
3358
3359 flags = pci_ea_flags(dev, prop);
3360 if (!flags) {
3361 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3362 goto out;
3363 }
3364
3365 /* Read Base */
3366 pci_read_config_dword(dev, ent_offset, &base);
3367 start = (base & PCI_EA_FIELD_MASK);
3368 ent_offset += 4;
3369
3370 /* Read MaxOffset */
3371 pci_read_config_dword(dev, ent_offset, &max_offset);
3372 ent_offset += 4;
3373
3374 /* Read Base MSBs (if 64-bit entry) */
3375 if (base & PCI_EA_IS_64) {
3376 u32 base_upper;
3377
3378 pci_read_config_dword(dev, ent_offset, &base_upper);
3379 ent_offset += 4;
3380
3381 flags |= IORESOURCE_MEM_64;
3382
3383 /* entry starts above 32-bit boundary, can't use */
3384 if (!support_64 && base_upper)
3385 goto out;
3386
3387 if (support_64)
3388 start |= ((u64)base_upper << 32);
3389 }
3390
3391 end = start + (max_offset | 0x03);
3392
3393 /* Read MaxOffset MSBs (if 64-bit entry) */
3394 if (max_offset & PCI_EA_IS_64) {
3395 u32 max_offset_upper;
3396
3397 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3398 ent_offset += 4;
3399
3400 flags |= IORESOURCE_MEM_64;
3401
3402 /* entry too big, can't use */
3403 if (!support_64 && max_offset_upper)
3404 goto out;
3405
3406 if (support_64)
3407 end += ((u64)max_offset_upper << 32);
3408 }
3409
3410 if (end < start) {
3411 pci_err(dev, "EA Entry crosses address boundary\n");
3412 goto out;
3413 }
3414
3415 if (ent_size != ent_offset - offset) {
3416 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3417 ent_size, ent_offset - offset);
3418 goto out;
3419 }
3420
3421 res->name = pci_name(dev);
3422 res->start = start;
3423 res->end = end;
3424 res->flags = flags;
3425
3426 if (bei <= PCI_EA_BEI_BAR5)
3427 pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3428 res_name, res, prop);
3429 else if (bei == PCI_EA_BEI_ROM)
3430 pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3431 res_name, res, prop);
3432 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3433 pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3434 res_name, res, prop);
3435 else
3436 pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
3437 bei, res, prop);
3438
3439 out:
3440 return offset + ent_size;
3441 }
3442
3443 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3444 void pci_ea_init(struct pci_dev *dev)
3445 {
3446 int ea;
3447 u8 num_ent;
3448 int offset;
3449 int i;
3450
3451 /* find PCI EA capability in list */
3452 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3453 if (!ea)
3454 return;
3455
3456 /* determine the number of entries */
3457 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3458 &num_ent);
3459 num_ent &= PCI_EA_NUM_ENT_MASK;
3460
3461 offset = ea + PCI_EA_FIRST_ENT;
3462
3463 /* Skip DWORD 2 for type 1 functions */
3464 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3465 offset += 4;
3466
3467 /* parse each EA entry */
3468 for (i = 0; i < num_ent; ++i)
3469 offset = pci_ea_read(dev, offset);
3470 }
3471
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3472 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3473 struct pci_cap_saved_state *new_cap)
3474 {
3475 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3476 }
3477
3478 /**
3479 * _pci_add_cap_save_buffer - allocate buffer for saving given
3480 * capability registers
3481 * @dev: the PCI device
3482 * @cap: the capability to allocate the buffer for
3483 * @extended: Standard or Extended capability ID
3484 * @size: requested size of the buffer
3485 */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3486 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3487 bool extended, unsigned int size)
3488 {
3489 int pos;
3490 struct pci_cap_saved_state *save_state;
3491
3492 if (extended)
3493 pos = pci_find_ext_capability(dev, cap);
3494 else
3495 pos = pci_find_capability(dev, cap);
3496
3497 if (!pos)
3498 return 0;
3499
3500 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3501 if (!save_state)
3502 return -ENOMEM;
3503
3504 save_state->cap.cap_nr = cap;
3505 save_state->cap.cap_extended = extended;
3506 save_state->cap.size = size;
3507 pci_add_saved_cap(dev, save_state);
3508
3509 return 0;
3510 }
3511
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3512 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3513 {
3514 return _pci_add_cap_save_buffer(dev, cap, false, size);
3515 }
3516
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3517 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3518 {
3519 return _pci_add_cap_save_buffer(dev, cap, true, size);
3520 }
3521
3522 /**
3523 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3524 * @dev: the PCI device
3525 */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3526 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3527 {
3528 int error;
3529
3530 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3531 PCI_EXP_SAVE_REGS * sizeof(u16));
3532 if (error)
3533 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3534
3535 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3536 if (error)
3537 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3538
3539 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3540 2 * sizeof(u16));
3541 if (error)
3542 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3543
3544 pci_allocate_vc_save_buffers(dev);
3545 }
3546
pci_free_cap_save_buffers(struct pci_dev * dev)3547 void pci_free_cap_save_buffers(struct pci_dev *dev)
3548 {
3549 struct pci_cap_saved_state *tmp;
3550 struct hlist_node *n;
3551
3552 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3553 kfree(tmp);
3554 }
3555
3556 /**
3557 * pci_configure_ari - enable or disable ARI forwarding
3558 * @dev: the PCI device
3559 *
3560 * If @dev and its upstream bridge both support ARI, enable ARI in the
3561 * bridge. Otherwise, disable ARI in the bridge.
3562 */
pci_configure_ari(struct pci_dev * dev)3563 void pci_configure_ari(struct pci_dev *dev)
3564 {
3565 u32 cap;
3566 struct pci_dev *bridge;
3567
3568 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3569 return;
3570
3571 bridge = dev->bus->self;
3572 if (!bridge)
3573 return;
3574
3575 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3576 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3577 return;
3578
3579 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3580 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3581 PCI_EXP_DEVCTL2_ARI);
3582 bridge->ari_enabled = 1;
3583 } else {
3584 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3585 PCI_EXP_DEVCTL2_ARI);
3586 bridge->ari_enabled = 0;
3587 }
3588 }
3589
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3590 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3591 {
3592 int pos;
3593 u16 cap, ctrl;
3594
3595 pos = pdev->acs_cap;
3596 if (!pos)
3597 return false;
3598
3599 /*
3600 * Except for egress control, capabilities are either required
3601 * or only required if controllable. Features missing from the
3602 * capability field can therefore be assumed as hard-wired enabled.
3603 */
3604 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3605 acs_flags &= (cap | PCI_ACS_EC);
3606
3607 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3608 return (ctrl & acs_flags) == acs_flags;
3609 }
3610
3611 /**
3612 * pci_acs_enabled - test ACS against required flags for a given device
3613 * @pdev: device to test
3614 * @acs_flags: required PCI ACS flags
3615 *
3616 * Return true if the device supports the provided flags. Automatically
3617 * filters out flags that are not implemented on multifunction devices.
3618 *
3619 * Note that this interface checks the effective ACS capabilities of the
3620 * device rather than the actual capabilities. For instance, most single
3621 * function endpoints are not required to support ACS because they have no
3622 * opportunity for peer-to-peer access. We therefore return 'true'
3623 * regardless of whether the device exposes an ACS capability. This makes
3624 * it much easier for callers of this function to ignore the actual type
3625 * or topology of the device when testing ACS support.
3626 */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3627 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3628 {
3629 int ret;
3630
3631 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3632 if (ret >= 0)
3633 return ret > 0;
3634
3635 /*
3636 * Conventional PCI and PCI-X devices never support ACS, either
3637 * effectively or actually. The shared bus topology implies that
3638 * any device on the bus can receive or snoop DMA.
3639 */
3640 if (!pci_is_pcie(pdev))
3641 return false;
3642
3643 switch (pci_pcie_type(pdev)) {
3644 /*
3645 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3646 * but since their primary interface is PCI/X, we conservatively
3647 * handle them as we would a non-PCIe device.
3648 */
3649 case PCI_EXP_TYPE_PCIE_BRIDGE:
3650 /*
3651 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
3652 * applicable... must never implement an ACS Extended Capability...".
3653 * This seems arbitrary, but we take a conservative interpretation
3654 * of this statement.
3655 */
3656 case PCI_EXP_TYPE_PCI_BRIDGE:
3657 case PCI_EXP_TYPE_RC_EC:
3658 return false;
3659 /*
3660 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3661 * implement ACS in order to indicate their peer-to-peer capabilities,
3662 * regardless of whether they are single- or multi-function devices.
3663 */
3664 case PCI_EXP_TYPE_DOWNSTREAM:
3665 case PCI_EXP_TYPE_ROOT_PORT:
3666 return pci_acs_flags_enabled(pdev, acs_flags);
3667 /*
3668 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3669 * implemented by the remaining PCIe types to indicate peer-to-peer
3670 * capabilities, but only when they are part of a multifunction
3671 * device. The footnote for section 6.12 indicates the specific
3672 * PCIe types included here.
3673 */
3674 case PCI_EXP_TYPE_ENDPOINT:
3675 case PCI_EXP_TYPE_UPSTREAM:
3676 case PCI_EXP_TYPE_LEG_END:
3677 case PCI_EXP_TYPE_RC_END:
3678 if (!pdev->multifunction)
3679 break;
3680
3681 return pci_acs_flags_enabled(pdev, acs_flags);
3682 }
3683
3684 /*
3685 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3686 * to single function devices with the exception of downstream ports.
3687 */
3688 return true;
3689 }
3690
3691 /**
3692 * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3693 * @start: starting downstream device
3694 * @end: ending upstream device or NULL to search to the root bus
3695 * @acs_flags: required flags
3696 *
3697 * Walk up a device tree from start to end testing PCI ACS support. If
3698 * any step along the way does not support the required flags, return false.
3699 */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3700 bool pci_acs_path_enabled(struct pci_dev *start,
3701 struct pci_dev *end, u16 acs_flags)
3702 {
3703 struct pci_dev *pdev, *parent = start;
3704
3705 do {
3706 pdev = parent;
3707
3708 if (!pci_acs_enabled(pdev, acs_flags))
3709 return false;
3710
3711 if (pci_is_root_bus(pdev->bus))
3712 return (end == NULL);
3713
3714 parent = pdev->bus->self;
3715 } while (pdev != end);
3716
3717 return true;
3718 }
3719
3720 /**
3721 * pci_acs_init - Initialize ACS if hardware supports it
3722 * @dev: the PCI device
3723 */
pci_acs_init(struct pci_dev * dev)3724 void pci_acs_init(struct pci_dev *dev)
3725 {
3726 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3727
3728 /*
3729 * Attempt to enable ACS regardless of capability because some Root
3730 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3731 * the standard ACS capability but still support ACS via those
3732 * quirks.
3733 */
3734 pci_enable_acs(dev);
3735 }
3736
pci_rebar_init(struct pci_dev * pdev)3737 void pci_rebar_init(struct pci_dev *pdev)
3738 {
3739 pdev->rebar_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3740 }
3741
3742 /**
3743 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3744 * @pdev: PCI device
3745 * @bar: BAR to find
3746 *
3747 * Helper to find the position of the ctrl register for a BAR.
3748 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3749 * Returns -ENOENT if no ctrl register for the BAR could be found.
3750 */
pci_rebar_find_pos(struct pci_dev * pdev,int bar)3751 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3752 {
3753 unsigned int pos, nbars, i;
3754 u32 ctrl;
3755
3756 pos = pdev->rebar_cap;
3757 if (!pos)
3758 return -ENOTSUPP;
3759
3760 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3761 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
3762
3763 for (i = 0; i < nbars; i++, pos += 8) {
3764 int bar_idx;
3765
3766 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3767 bar_idx = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, ctrl);
3768 if (bar_idx == bar)
3769 return pos;
3770 }
3771
3772 return -ENOENT;
3773 }
3774
3775 /**
3776 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3777 * @pdev: PCI device
3778 * @bar: BAR to query
3779 *
3780 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3781 * (bit 0=1MB, bit 31=128TB). Returns 0 if BAR isn't resizable.
3782 */
pci_rebar_get_possible_sizes(struct pci_dev * pdev,int bar)3783 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3784 {
3785 int pos;
3786 u32 cap;
3787
3788 pos = pci_rebar_find_pos(pdev, bar);
3789 if (pos < 0)
3790 return 0;
3791
3792 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3793 cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
3794
3795 /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3796 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3797 bar == 0 && cap == 0x700)
3798 return 0x3f00;
3799
3800 return cap;
3801 }
3802 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3803
3804 /**
3805 * pci_rebar_get_current_size - get the current size of a BAR
3806 * @pdev: PCI device
3807 * @bar: BAR to set size to
3808 *
3809 * Read the size of a BAR from the resizable BAR config.
3810 * Returns size if found or negative error code.
3811 */
pci_rebar_get_current_size(struct pci_dev * pdev,int bar)3812 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3813 {
3814 int pos;
3815 u32 ctrl;
3816
3817 pos = pci_rebar_find_pos(pdev, bar);
3818 if (pos < 0)
3819 return pos;
3820
3821 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3822 return FIELD_GET(PCI_REBAR_CTRL_BAR_SIZE, ctrl);
3823 }
3824
3825 /**
3826 * pci_rebar_set_size - set a new size for a BAR
3827 * @pdev: PCI device
3828 * @bar: BAR to set size to
3829 * @size: new size as defined in the spec (0=1MB, 31=128TB)
3830 *
3831 * Set the new size of a BAR as defined in the spec.
3832 * Returns zero if resizing was successful, error code otherwise.
3833 */
pci_rebar_set_size(struct pci_dev * pdev,int bar,int size)3834 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3835 {
3836 int pos;
3837 u32 ctrl;
3838
3839 pos = pci_rebar_find_pos(pdev, bar);
3840 if (pos < 0)
3841 return pos;
3842
3843 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3844 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3845 ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
3846 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3847 return 0;
3848 }
3849
3850 /**
3851 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3852 * @dev: the PCI device
3853 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3854 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3855 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3856 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3857 *
3858 * Return 0 if all upstream bridges support AtomicOp routing, egress
3859 * blocking is disabled on all upstream ports, and the root port supports
3860 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3861 * AtomicOp completion), or negative otherwise.
3862 */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3863 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3864 {
3865 struct pci_bus *bus = dev->bus;
3866 struct pci_dev *bridge;
3867 u32 cap, ctl2;
3868
3869 /*
3870 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3871 * in Device Control 2 is reserved in VFs and the PF value applies
3872 * to all associated VFs.
3873 */
3874 if (dev->is_virtfn)
3875 return -EINVAL;
3876
3877 if (!pci_is_pcie(dev))
3878 return -EINVAL;
3879
3880 /*
3881 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3882 * AtomicOp requesters. For now, we only support endpoints as
3883 * requesters and root ports as completers. No endpoints as
3884 * completers, and no peer-to-peer.
3885 */
3886
3887 switch (pci_pcie_type(dev)) {
3888 case PCI_EXP_TYPE_ENDPOINT:
3889 case PCI_EXP_TYPE_LEG_END:
3890 case PCI_EXP_TYPE_RC_END:
3891 break;
3892 default:
3893 return -EINVAL;
3894 }
3895
3896 while (bus->parent) {
3897 bridge = bus->self;
3898
3899 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3900
3901 switch (pci_pcie_type(bridge)) {
3902 /* Ensure switch ports support AtomicOp routing */
3903 case PCI_EXP_TYPE_UPSTREAM:
3904 case PCI_EXP_TYPE_DOWNSTREAM:
3905 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3906 return -EINVAL;
3907 break;
3908
3909 /* Ensure root port supports all the sizes we care about */
3910 case PCI_EXP_TYPE_ROOT_PORT:
3911 if ((cap & cap_mask) != cap_mask)
3912 return -EINVAL;
3913 break;
3914 }
3915
3916 /* Ensure upstream ports don't block AtomicOps on egress */
3917 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3918 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3919 &ctl2);
3920 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3921 return -EINVAL;
3922 }
3923
3924 bus = bus->parent;
3925 }
3926
3927 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3928 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3929 return 0;
3930 }
3931 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3932
3933 /**
3934 * pci_release_region - Release a PCI bar
3935 * @pdev: PCI device whose resources were previously reserved by
3936 * pci_request_region()
3937 * @bar: BAR to release
3938 *
3939 * Releases the PCI I/O and memory resources previously reserved by a
3940 * successful call to pci_request_region(). Call this function only
3941 * after all use of the PCI regions has ceased.
3942 */
pci_release_region(struct pci_dev * pdev,int bar)3943 void pci_release_region(struct pci_dev *pdev, int bar)
3944 {
3945 if (!pci_bar_index_is_valid(bar))
3946 return;
3947
3948 if (pci_resource_len(pdev, bar) == 0)
3949 return;
3950 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3951 release_region(pci_resource_start(pdev, bar),
3952 pci_resource_len(pdev, bar));
3953 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3954 release_mem_region(pci_resource_start(pdev, bar),
3955 pci_resource_len(pdev, bar));
3956 }
3957 EXPORT_SYMBOL(pci_release_region);
3958
3959 /**
3960 * __pci_request_region - Reserved PCI I/O and memory resource
3961 * @pdev: PCI device whose resources are to be reserved
3962 * @bar: BAR to be reserved
3963 * @name: name of the driver requesting the resource
3964 * @exclusive: whether the region access is exclusive or not
3965 *
3966 * Returns: 0 on success, negative error code on failure.
3967 *
3968 * Mark the PCI region associated with PCI device @pdev BAR @bar as being
3969 * reserved by owner @name. Do not access any address inside the PCI regions
3970 * unless this call returns successfully.
3971 *
3972 * If @exclusive is set, then the region is marked so that userspace
3973 * is explicitly not allowed to map the resource via /dev/mem or
3974 * sysfs MMIO access.
3975 *
3976 * Returns 0 on success, or %EBUSY on error. A warning
3977 * message is also printed on failure.
3978 */
__pci_request_region(struct pci_dev * pdev,int bar,const char * name,int exclusive)3979 static int __pci_request_region(struct pci_dev *pdev, int bar,
3980 const char *name, int exclusive)
3981 {
3982 if (!pci_bar_index_is_valid(bar))
3983 return -EINVAL;
3984
3985 if (pci_resource_len(pdev, bar) == 0)
3986 return 0;
3987
3988 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3989 if (!request_region(pci_resource_start(pdev, bar),
3990 pci_resource_len(pdev, bar), name))
3991 goto err_out;
3992 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3993 if (!__request_mem_region(pci_resource_start(pdev, bar),
3994 pci_resource_len(pdev, bar), name,
3995 exclusive))
3996 goto err_out;
3997 }
3998
3999 return 0;
4000
4001 err_out:
4002 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
4003 &pdev->resource[bar]);
4004 return -EBUSY;
4005 }
4006
4007 /**
4008 * pci_request_region - Reserve PCI I/O and memory resource
4009 * @pdev: PCI device whose resources are to be reserved
4010 * @bar: BAR to be reserved
4011 * @name: name of the driver requesting the resource
4012 *
4013 * Returns: 0 on success, negative error code on failure.
4014 *
4015 * Mark the PCI region associated with PCI device @pdev BAR @bar as being
4016 * reserved by owner @name. Do not access any address inside the PCI regions
4017 * unless this call returns successfully.
4018 *
4019 * Returns 0 on success, or %EBUSY on error. A warning
4020 * message is also printed on failure.
4021 */
pci_request_region(struct pci_dev * pdev,int bar,const char * name)4022 int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
4023 {
4024 return __pci_request_region(pdev, bar, name, 0);
4025 }
4026 EXPORT_SYMBOL(pci_request_region);
4027
4028 /**
4029 * pci_release_selected_regions - Release selected PCI I/O and memory resources
4030 * @pdev: PCI device whose resources were previously reserved
4031 * @bars: Bitmask of BARs to be released
4032 *
4033 * Release selected PCI I/O and memory resources previously reserved.
4034 * Call this function only after all use of the PCI regions has ceased.
4035 */
pci_release_selected_regions(struct pci_dev * pdev,int bars)4036 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
4037 {
4038 int i;
4039
4040 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4041 if (bars & (1 << i))
4042 pci_release_region(pdev, i);
4043 }
4044 EXPORT_SYMBOL(pci_release_selected_regions);
4045
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * name,int excl)4046 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4047 const char *name, int excl)
4048 {
4049 int i;
4050
4051 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4052 if (bars & (1 << i))
4053 if (__pci_request_region(pdev, i, name, excl))
4054 goto err_out;
4055 return 0;
4056
4057 err_out:
4058 while (--i >= 0)
4059 if (bars & (1 << i))
4060 pci_release_region(pdev, i);
4061
4062 return -EBUSY;
4063 }
4064
4065
4066 /**
4067 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4068 * @pdev: PCI device whose resources are to be reserved
4069 * @bars: Bitmask of BARs to be requested
4070 * @name: Name of the driver requesting the resources
4071 *
4072 * Returns: 0 on success, negative error code on failure.
4073 */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * name)4074 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4075 const char *name)
4076 {
4077 return __pci_request_selected_regions(pdev, bars, name, 0);
4078 }
4079 EXPORT_SYMBOL(pci_request_selected_regions);
4080
4081 /**
4082 * pci_request_selected_regions_exclusive - Request regions exclusively
4083 * @pdev: PCI device to request regions from
4084 * @bars: bit mask of BARs to request
4085 * @name: name of the driver requesting the resources
4086 *
4087 * Returns: 0 on success, negative error code on failure.
4088 */
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * name)4089 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4090 const char *name)
4091 {
4092 return __pci_request_selected_regions(pdev, bars, name,
4093 IORESOURCE_EXCLUSIVE);
4094 }
4095 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4096
4097 /**
4098 * pci_release_regions - Release reserved PCI I/O and memory resources
4099 * @pdev: PCI device whose resources were previously reserved by
4100 * pci_request_regions()
4101 *
4102 * Releases all PCI I/O and memory resources previously reserved by a
4103 * successful call to pci_request_regions(). Call this function only
4104 * after all use of the PCI regions has ceased.
4105 */
pci_release_regions(struct pci_dev * pdev)4106 void pci_release_regions(struct pci_dev *pdev)
4107 {
4108 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4109 }
4110 EXPORT_SYMBOL(pci_release_regions);
4111
4112 /**
4113 * pci_request_regions - Reserve PCI I/O and memory resources
4114 * @pdev: PCI device whose resources are to be reserved
4115 * @name: name of the driver requesting the resources
4116 *
4117 * Mark all PCI regions associated with PCI device @pdev as being reserved by
4118 * owner @name. Do not access any address inside the PCI regions unless this
4119 * call returns successfully.
4120 *
4121 * Returns 0 on success, or %EBUSY on error. A warning
4122 * message is also printed on failure.
4123 */
pci_request_regions(struct pci_dev * pdev,const char * name)4124 int pci_request_regions(struct pci_dev *pdev, const char *name)
4125 {
4126 return pci_request_selected_regions(pdev,
4127 ((1 << PCI_STD_NUM_BARS) - 1), name);
4128 }
4129 EXPORT_SYMBOL(pci_request_regions);
4130
4131 /**
4132 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4133 * @pdev: PCI device whose resources are to be reserved
4134 * @name: name of the driver requesting the resources
4135 *
4136 * Returns: 0 on success, negative error code on failure.
4137 *
4138 * Mark all PCI regions associated with PCI device @pdev as being reserved
4139 * by owner @name. Do not access any address inside the PCI regions
4140 * unless this call returns successfully.
4141 *
4142 * pci_request_regions_exclusive() will mark the region so that /dev/mem
4143 * and the sysfs MMIO access will not be allowed.
4144 *
4145 * Returns 0 on success, or %EBUSY on error. A warning message is also
4146 * printed on failure.
4147 */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * name)4148 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
4149 {
4150 return pci_request_selected_regions_exclusive(pdev,
4151 ((1 << PCI_STD_NUM_BARS) - 1), name);
4152 }
4153 EXPORT_SYMBOL(pci_request_regions_exclusive);
4154
4155 /*
4156 * Record the PCI IO range (expressed as CPU physical address + size).
4157 * Return a negative value if an error has occurred, zero otherwise
4158 */
pci_register_io_range(const struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)4159 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
4160 resource_size_t size)
4161 {
4162 int ret = 0;
4163 #ifdef PCI_IOBASE
4164 struct logic_pio_hwaddr *range;
4165
4166 if (!size || addr + size < addr)
4167 return -EINVAL;
4168
4169 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4170 if (!range)
4171 return -ENOMEM;
4172
4173 range->fwnode = fwnode;
4174 range->size = size;
4175 range->hw_start = addr;
4176 range->flags = LOGIC_PIO_CPU_MMIO;
4177
4178 ret = logic_pio_register_range(range);
4179 if (ret)
4180 kfree(range);
4181
4182 /* Ignore duplicates due to deferred probing */
4183 if (ret == -EEXIST)
4184 ret = 0;
4185 #endif
4186
4187 return ret;
4188 }
4189
pci_pio_to_address(unsigned long pio)4190 phys_addr_t pci_pio_to_address(unsigned long pio)
4191 {
4192 #ifdef PCI_IOBASE
4193 if (pio < MMIO_UPPER_LIMIT)
4194 return logic_pio_to_hwaddr(pio);
4195 #endif
4196
4197 return (phys_addr_t) OF_BAD_ADDR;
4198 }
4199 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4200
pci_address_to_pio(phys_addr_t address)4201 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4202 {
4203 #ifdef PCI_IOBASE
4204 return logic_pio_trans_cpuaddr(address);
4205 #else
4206 if (address > IO_SPACE_LIMIT)
4207 return (unsigned long)-1;
4208
4209 return (unsigned long) address;
4210 #endif
4211 }
4212
4213 /**
4214 * pci_remap_iospace - Remap the memory mapped I/O space
4215 * @res: Resource describing the I/O space
4216 * @phys_addr: physical address of range to be mapped
4217 *
4218 * Remap the memory mapped I/O space described by the @res and the CPU
4219 * physical address @phys_addr into virtual address space. Only
4220 * architectures that have memory mapped IO functions defined (and the
4221 * PCI_IOBASE value defined) should call this function.
4222 */
4223 #ifndef pci_remap_iospace
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)4224 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4225 {
4226 #if defined(PCI_IOBASE)
4227 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4228
4229 if (!(res->flags & IORESOURCE_IO))
4230 return -EINVAL;
4231
4232 if (res->end > IO_SPACE_LIMIT)
4233 return -EINVAL;
4234
4235 return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4236 pgprot_device(PAGE_KERNEL));
4237 #else
4238 /*
4239 * This architecture does not have memory mapped I/O space,
4240 * so this function should never be called
4241 */
4242 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4243 return -ENODEV;
4244 #endif
4245 }
4246 EXPORT_SYMBOL(pci_remap_iospace);
4247 #endif
4248
4249 /**
4250 * pci_unmap_iospace - Unmap the memory mapped I/O space
4251 * @res: resource to be unmapped
4252 *
4253 * Unmap the CPU virtual address @res from virtual address space. Only
4254 * architectures that have memory mapped IO functions defined (and the
4255 * PCI_IOBASE value defined) should call this function.
4256 */
pci_unmap_iospace(struct resource * res)4257 void pci_unmap_iospace(struct resource *res)
4258 {
4259 #if defined(PCI_IOBASE)
4260 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4261
4262 vunmap_range(vaddr, vaddr + resource_size(res));
4263 #endif
4264 }
4265 EXPORT_SYMBOL(pci_unmap_iospace);
4266
__pci_set_master(struct pci_dev * dev,bool enable)4267 static void __pci_set_master(struct pci_dev *dev, bool enable)
4268 {
4269 u16 old_cmd, cmd;
4270
4271 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4272 if (enable)
4273 cmd = old_cmd | PCI_COMMAND_MASTER;
4274 else
4275 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4276 if (cmd != old_cmd) {
4277 pci_dbg(dev, "%s bus mastering\n",
4278 enable ? "enabling" : "disabling");
4279 pci_write_config_word(dev, PCI_COMMAND, cmd);
4280 }
4281 dev->is_busmaster = enable;
4282 }
4283
4284 /**
4285 * pcibios_setup - process "pci=" kernel boot arguments
4286 * @str: string used to pass in "pci=" kernel boot arguments
4287 *
4288 * Process kernel boot arguments. This is the default implementation.
4289 * Architecture specific implementations can override this as necessary.
4290 */
pcibios_setup(char * str)4291 char * __weak __init pcibios_setup(char *str)
4292 {
4293 return str;
4294 }
4295
4296 /**
4297 * pcibios_set_master - enable PCI bus-mastering for device dev
4298 * @dev: the PCI device to enable
4299 *
4300 * Enables PCI bus-mastering for the device. This is the default
4301 * implementation. Architecture specific implementations can override
4302 * this if necessary.
4303 */
pcibios_set_master(struct pci_dev * dev)4304 void __weak pcibios_set_master(struct pci_dev *dev)
4305 {
4306 u8 lat;
4307
4308 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4309 if (pci_is_pcie(dev))
4310 return;
4311
4312 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4313 if (lat < 16)
4314 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4315 else if (lat > pcibios_max_latency)
4316 lat = pcibios_max_latency;
4317 else
4318 return;
4319
4320 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4321 }
4322
4323 /**
4324 * pci_set_master - enables bus-mastering for device dev
4325 * @dev: the PCI device to enable
4326 *
4327 * Enables bus-mastering on the device and calls pcibios_set_master()
4328 * to do the needed arch specific settings.
4329 */
pci_set_master(struct pci_dev * dev)4330 void pci_set_master(struct pci_dev *dev)
4331 {
4332 __pci_set_master(dev, true);
4333 pcibios_set_master(dev);
4334 }
4335 EXPORT_SYMBOL(pci_set_master);
4336
4337 /**
4338 * pci_clear_master - disables bus-mastering for device dev
4339 * @dev: the PCI device to disable
4340 */
pci_clear_master(struct pci_dev * dev)4341 void pci_clear_master(struct pci_dev *dev)
4342 {
4343 __pci_set_master(dev, false);
4344 }
4345 EXPORT_SYMBOL(pci_clear_master);
4346
4347 /**
4348 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4349 * @dev: the PCI device for which MWI is to be enabled
4350 *
4351 * Helper function for pci_set_mwi.
4352 * Originally copied from drivers/net/acenic.c.
4353 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4354 *
4355 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4356 */
pci_set_cacheline_size(struct pci_dev * dev)4357 int pci_set_cacheline_size(struct pci_dev *dev)
4358 {
4359 u8 cacheline_size;
4360
4361 if (!pci_cache_line_size)
4362 return -EINVAL;
4363
4364 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4365 equal to or multiple of the right value. */
4366 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4367 if (cacheline_size >= pci_cache_line_size &&
4368 (cacheline_size % pci_cache_line_size) == 0)
4369 return 0;
4370
4371 /* Write the correct value. */
4372 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4373 /* Read it back. */
4374 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4375 if (cacheline_size == pci_cache_line_size)
4376 return 0;
4377
4378 pci_dbg(dev, "cache line size of %d is not supported\n",
4379 pci_cache_line_size << 2);
4380
4381 return -EINVAL;
4382 }
4383 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4384
4385 /**
4386 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4387 * @dev: the PCI device for which MWI is enabled
4388 *
4389 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4390 *
4391 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4392 */
pci_set_mwi(struct pci_dev * dev)4393 int pci_set_mwi(struct pci_dev *dev)
4394 {
4395 #ifdef PCI_DISABLE_MWI
4396 return 0;
4397 #else
4398 int rc;
4399 u16 cmd;
4400
4401 rc = pci_set_cacheline_size(dev);
4402 if (rc)
4403 return rc;
4404
4405 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4406 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4407 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4408 cmd |= PCI_COMMAND_INVALIDATE;
4409 pci_write_config_word(dev, PCI_COMMAND, cmd);
4410 }
4411 return 0;
4412 #endif
4413 }
4414 EXPORT_SYMBOL(pci_set_mwi);
4415
4416 /**
4417 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4418 * @dev: the PCI device for which MWI is enabled
4419 *
4420 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4421 * Callers are not required to check the return value.
4422 *
4423 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4424 */
pci_try_set_mwi(struct pci_dev * dev)4425 int pci_try_set_mwi(struct pci_dev *dev)
4426 {
4427 #ifdef PCI_DISABLE_MWI
4428 return 0;
4429 #else
4430 return pci_set_mwi(dev);
4431 #endif
4432 }
4433 EXPORT_SYMBOL(pci_try_set_mwi);
4434
4435 /**
4436 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4437 * @dev: the PCI device to disable
4438 *
4439 * Disables PCI Memory-Write-Invalidate transaction on the device
4440 */
pci_clear_mwi(struct pci_dev * dev)4441 void pci_clear_mwi(struct pci_dev *dev)
4442 {
4443 #ifndef PCI_DISABLE_MWI
4444 u16 cmd;
4445
4446 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4447 if (cmd & PCI_COMMAND_INVALIDATE) {
4448 cmd &= ~PCI_COMMAND_INVALIDATE;
4449 pci_write_config_word(dev, PCI_COMMAND, cmd);
4450 }
4451 #endif
4452 }
4453 EXPORT_SYMBOL(pci_clear_mwi);
4454
4455 /**
4456 * pci_disable_parity - disable parity checking for device
4457 * @dev: the PCI device to operate on
4458 *
4459 * Disable parity checking for device @dev
4460 */
pci_disable_parity(struct pci_dev * dev)4461 void pci_disable_parity(struct pci_dev *dev)
4462 {
4463 u16 cmd;
4464
4465 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4466 if (cmd & PCI_COMMAND_PARITY) {
4467 cmd &= ~PCI_COMMAND_PARITY;
4468 pci_write_config_word(dev, PCI_COMMAND, cmd);
4469 }
4470 }
4471
4472 /**
4473 * pci_intx - enables/disables PCI INTx for device dev
4474 * @pdev: the PCI device to operate on
4475 * @enable: boolean: whether to enable or disable PCI INTx
4476 *
4477 * Enables/disables PCI INTx for device @pdev
4478 */
pci_intx(struct pci_dev * pdev,int enable)4479 void pci_intx(struct pci_dev *pdev, int enable)
4480 {
4481 u16 pci_command, new;
4482
4483 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4484
4485 if (enable)
4486 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4487 else
4488 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4489
4490 if (new == pci_command)
4491 return;
4492
4493 pci_write_config_word(pdev, PCI_COMMAND, new);
4494 }
4495 EXPORT_SYMBOL_GPL(pci_intx);
4496
4497 /**
4498 * pci_wait_for_pending_transaction - wait for pending transaction
4499 * @dev: the PCI device to operate on
4500 *
4501 * Return 0 if transaction is pending 1 otherwise.
4502 */
pci_wait_for_pending_transaction(struct pci_dev * dev)4503 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4504 {
4505 if (!pci_is_pcie(dev))
4506 return 1;
4507
4508 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4509 PCI_EXP_DEVSTA_TRPND);
4510 }
4511 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4512
4513 /**
4514 * pcie_flr - initiate a PCIe function level reset
4515 * @dev: device to reset
4516 *
4517 * Initiate a function level reset unconditionally on @dev without
4518 * checking any flags and DEVCAP
4519 */
pcie_flr(struct pci_dev * dev)4520 int pcie_flr(struct pci_dev *dev)
4521 {
4522 if (!pci_wait_for_pending_transaction(dev))
4523 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4524
4525 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4526
4527 if (dev->imm_ready)
4528 return 0;
4529
4530 /*
4531 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4532 * 100ms, but may silently discard requests while the FLR is in
4533 * progress. Wait 100ms before trying to access the device.
4534 */
4535 msleep(100);
4536
4537 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4538 }
4539 EXPORT_SYMBOL_GPL(pcie_flr);
4540
4541 /**
4542 * pcie_reset_flr - initiate a PCIe function level reset
4543 * @dev: device to reset
4544 * @probe: if true, return 0 if device can be reset this way
4545 *
4546 * Initiate a function level reset on @dev.
4547 */
pcie_reset_flr(struct pci_dev * dev,bool probe)4548 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4549 {
4550 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4551 return -ENOTTY;
4552
4553 if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4554 return -ENOTTY;
4555
4556 if (probe)
4557 return 0;
4558
4559 return pcie_flr(dev);
4560 }
4561 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4562
pci_af_flr(struct pci_dev * dev,bool probe)4563 static int pci_af_flr(struct pci_dev *dev, bool probe)
4564 {
4565 int pos;
4566 u8 cap;
4567
4568 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4569 if (!pos)
4570 return -ENOTTY;
4571
4572 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4573 return -ENOTTY;
4574
4575 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4576 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4577 return -ENOTTY;
4578
4579 if (probe)
4580 return 0;
4581
4582 /*
4583 * Wait for Transaction Pending bit to clear. A word-aligned test
4584 * is used, so we use the control offset rather than status and shift
4585 * the test bit to match.
4586 */
4587 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4588 PCI_AF_STATUS_TP << 8))
4589 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4590
4591 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4592
4593 if (dev->imm_ready)
4594 return 0;
4595
4596 /*
4597 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4598 * updated 27 July 2006; a device must complete an FLR within
4599 * 100ms, but may silently discard requests while the FLR is in
4600 * progress. Wait 100ms before trying to access the device.
4601 */
4602 msleep(100);
4603
4604 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4605 }
4606
4607 /**
4608 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4609 * @dev: Device to reset.
4610 * @probe: if true, return 0 if the device can be reset this way.
4611 *
4612 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4613 * unset, it will be reinitialized internally when going from PCI_D3hot to
4614 * PCI_D0. If that's the case and the device is not in a low-power state
4615 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4616 *
4617 * NOTE: This causes the caller to sleep for twice the device power transition
4618 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4619 * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4620 * Moreover, only devices in D0 can be reset by this function.
4621 */
pci_pm_reset(struct pci_dev * dev,bool probe)4622 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4623 {
4624 u16 csr;
4625
4626 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4627 return -ENOTTY;
4628
4629 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4630 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4631 return -ENOTTY;
4632
4633 if (probe)
4634 return 0;
4635
4636 if (dev->current_state != PCI_D0)
4637 return -EINVAL;
4638
4639 csr &= ~PCI_PM_CTRL_STATE_MASK;
4640 csr |= PCI_D3hot;
4641 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4642 pci_dev_d3_sleep(dev);
4643
4644 csr &= ~PCI_PM_CTRL_STATE_MASK;
4645 csr |= PCI_D0;
4646 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4647 pci_dev_d3_sleep(dev);
4648
4649 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4650 }
4651
4652 /**
4653 * pcie_wait_for_link_status - Wait for link status change
4654 * @pdev: Device whose link to wait for.
4655 * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE.
4656 * @active: Waiting for active or inactive?
4657 *
4658 * Return 0 if successful, or -ETIMEDOUT if status has not changed within
4659 * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4660 */
pcie_wait_for_link_status(struct pci_dev * pdev,bool use_lt,bool active)4661 static int pcie_wait_for_link_status(struct pci_dev *pdev,
4662 bool use_lt, bool active)
4663 {
4664 u16 lnksta_mask, lnksta_match;
4665 unsigned long end_jiffies;
4666 u16 lnksta;
4667
4668 lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA;
4669 lnksta_match = active ? lnksta_mask : 0;
4670
4671 end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS);
4672 do {
4673 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
4674 if ((lnksta & lnksta_mask) == lnksta_match)
4675 return 0;
4676 msleep(1);
4677 } while (time_before(jiffies, end_jiffies));
4678
4679 return -ETIMEDOUT;
4680 }
4681
4682 /**
4683 * pcie_retrain_link - Request a link retrain and wait for it to complete
4684 * @pdev: Device whose link to retrain.
4685 * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
4686 *
4687 * Trigger retraining of the PCIe Link and wait for the completion of the
4688 * retraining. As link retraining is known to asserts LBMS and may change
4689 * the Link Speed, LBMS is cleared after the retraining and the Link Speed
4690 * of the subordinate bus is updated.
4691 *
4692 * Retrain completion status is retrieved from the Link Status Register
4693 * according to @use_lt. It is not verified whether the use of the DLLLA
4694 * bit is valid.
4695 *
4696 * Return 0 if successful, or -ETIMEDOUT if training has not completed
4697 * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4698 */
pcie_retrain_link(struct pci_dev * pdev,bool use_lt)4699 int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
4700 {
4701 int rc;
4702
4703 /*
4704 * Ensure the updated LNKCTL parameters are used during link
4705 * training by checking that there is no ongoing link training that
4706 * may have started before link parameters were changed, so as to
4707 * avoid LTSSM race as recommended in Implementation Note at the end
4708 * of PCIe r6.1 sec 7.5.3.7.
4709 */
4710 rc = pcie_wait_for_link_status(pdev, true, false);
4711 if (rc)
4712 return rc;
4713
4714 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4715 if (pdev->clear_retrain_link) {
4716 /*
4717 * Due to an erratum in some devices the Retrain Link bit
4718 * needs to be cleared again manually to allow the link
4719 * training to succeed.
4720 */
4721 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4722 }
4723
4724 rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
4725
4726 /*
4727 * Clear LBMS after a manual retrain so that the bit can be used
4728 * to track link speed or width changes made by hardware itself
4729 * in attempt to correct unreliable link operation.
4730 */
4731 pcie_reset_lbms(pdev);
4732
4733 /*
4734 * Ensure the Link Speed updates after retraining in case the Link
4735 * Speed was changed because of the retraining. While the bwctrl's
4736 * IRQ handler normally picks up the new Link Speed, clearing LBMS
4737 * races with the IRQ handler reading the Link Status register and
4738 * can result in the handler returning early without updating the
4739 * Link Speed.
4740 */
4741 if (pdev->subordinate)
4742 pcie_update_link_speed(pdev->subordinate);
4743
4744 return rc;
4745 }
4746
4747 /**
4748 * pcie_wait_for_link_delay - Wait until link is active or inactive
4749 * @pdev: Bridge device
4750 * @active: waiting for active or inactive?
4751 * @delay: Delay to wait after link has become active (in ms)
4752 *
4753 * Use this to wait till link becomes active or inactive.
4754 */
pcie_wait_for_link_delay(struct pci_dev * pdev,bool active,int delay)4755 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4756 int delay)
4757 {
4758 int rc;
4759
4760 /*
4761 * Some controllers might not implement link active reporting. In this
4762 * case, we wait for 1000 ms + any delay requested by the caller.
4763 */
4764 if (!pdev->link_active_reporting) {
4765 msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay);
4766 return true;
4767 }
4768
4769 /*
4770 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4771 * after which we should expect the link to be active if the reset was
4772 * successful. If so, software must wait a minimum 100ms before sending
4773 * configuration requests to devices downstream this port.
4774 *
4775 * If the link fails to activate, either the device was physically
4776 * removed or the link is permanently failed.
4777 */
4778 if (active)
4779 msleep(20);
4780 rc = pcie_wait_for_link_status(pdev, false, active);
4781 if (active) {
4782 if (rc)
4783 rc = pcie_failed_link_retrain(pdev);
4784 if (rc)
4785 return false;
4786
4787 msleep(delay);
4788 return true;
4789 }
4790
4791 if (rc)
4792 return false;
4793
4794 return true;
4795 }
4796
4797 /**
4798 * pcie_wait_for_link - Wait until link is active or inactive
4799 * @pdev: Bridge device
4800 * @active: waiting for active or inactive?
4801 *
4802 * Use this to wait till link becomes active or inactive.
4803 */
pcie_wait_for_link(struct pci_dev * pdev,bool active)4804 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4805 {
4806 return pcie_wait_for_link_delay(pdev, active, 100);
4807 }
4808
4809 /*
4810 * Find maximum D3cold delay required by all the devices on the bus. The
4811 * spec says 100 ms, but firmware can lower it and we allow drivers to
4812 * increase it as well.
4813 *
4814 * Called with @pci_bus_sem locked for reading.
4815 */
pci_bus_max_d3cold_delay(const struct pci_bus * bus)4816 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4817 {
4818 const struct pci_dev *pdev;
4819 int min_delay = 100;
4820 int max_delay = 0;
4821
4822 list_for_each_entry(pdev, &bus->devices, bus_list) {
4823 if (pdev->d3cold_delay < min_delay)
4824 min_delay = pdev->d3cold_delay;
4825 if (pdev->d3cold_delay > max_delay)
4826 max_delay = pdev->d3cold_delay;
4827 }
4828
4829 return max(min_delay, max_delay);
4830 }
4831
4832 /**
4833 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4834 * @dev: PCI bridge
4835 * @reset_type: reset type in human-readable form
4836 *
4837 * Handle necessary delays before access to the devices on the secondary
4838 * side of the bridge are permitted after D3cold to D0 transition
4839 * or Conventional Reset.
4840 *
4841 * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4842 * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4843 * 4.3.2.
4844 *
4845 * Return 0 on success or -ENOTTY if the first device on the secondary bus
4846 * failed to become accessible.
4847 */
pci_bridge_wait_for_secondary_bus(struct pci_dev * dev,char * reset_type)4848 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
4849 {
4850 struct pci_dev *child __free(pci_dev_put) = NULL;
4851 int delay;
4852
4853 if (pci_dev_is_disconnected(dev))
4854 return 0;
4855
4856 if (!pci_is_bridge(dev))
4857 return 0;
4858
4859 down_read(&pci_bus_sem);
4860
4861 /*
4862 * We only deal with devices that are present currently on the bus.
4863 * For any hot-added devices the access delay is handled in pciehp
4864 * board_added(). In case of ACPI hotplug the firmware is expected
4865 * to configure the devices before OS is notified.
4866 */
4867 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4868 up_read(&pci_bus_sem);
4869 return 0;
4870 }
4871
4872 /* Take d3cold_delay requirements into account */
4873 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4874 if (!delay) {
4875 up_read(&pci_bus_sem);
4876 return 0;
4877 }
4878
4879 child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
4880 struct pci_dev, bus_list));
4881 up_read(&pci_bus_sem);
4882
4883 /*
4884 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4885 * accessing the device after reset (that is 1000 ms + 100 ms).
4886 */
4887 if (!pci_is_pcie(dev)) {
4888 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4889 msleep(1000 + delay);
4890 return 0;
4891 }
4892
4893 /*
4894 * For PCIe downstream and root ports that do not support speeds
4895 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4896 * speeds (gen3) we need to wait first for the data link layer to
4897 * become active.
4898 *
4899 * However, 100 ms is the minimum and the PCIe spec says the
4900 * software must allow at least 1s before it can determine that the
4901 * device that did not respond is a broken device. Also device can
4902 * take longer than that to respond if it indicates so through Request
4903 * Retry Status completions.
4904 *
4905 * Therefore we wait for 100 ms and check for the device presence
4906 * until the timeout expires.
4907 */
4908 if (!pcie_downstream_port(dev))
4909 return 0;
4910
4911 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4912 u16 status;
4913
4914 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4915 msleep(delay);
4916
4917 if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay))
4918 return 0;
4919
4920 /*
4921 * If the port supports active link reporting we now check
4922 * whether the link is active and if not bail out early with
4923 * the assumption that the device is not present anymore.
4924 */
4925 if (!dev->link_active_reporting)
4926 return -ENOTTY;
4927
4928 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status);
4929 if (!(status & PCI_EXP_LNKSTA_DLLLA))
4930 return -ENOTTY;
4931
4932 return pci_dev_wait(child, reset_type,
4933 PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT);
4934 }
4935
4936 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4937 delay);
4938 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4939 /* Did not train, no need to wait any further */
4940 pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay);
4941 return -ENOTTY;
4942 }
4943
4944 return pci_dev_wait(child, reset_type,
4945 PCIE_RESET_READY_POLL_MS - delay);
4946 }
4947
pci_reset_secondary_bus(struct pci_dev * dev)4948 void pci_reset_secondary_bus(struct pci_dev *dev)
4949 {
4950 u16 ctrl;
4951
4952 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4953 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4954 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4955
4956 /*
4957 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
4958 * this to 2ms to ensure that we meet the minimum requirement.
4959 */
4960 msleep(2);
4961
4962 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4963 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4964 }
4965
pcibios_reset_secondary_bus(struct pci_dev * dev)4966 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4967 {
4968 pci_reset_secondary_bus(dev);
4969 }
4970
4971 /**
4972 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4973 * @dev: Bridge device
4974 *
4975 * Use the bridge control register to assert reset on the secondary bus.
4976 * Devices on the secondary bus are left in power-on state.
4977 */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)4978 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4979 {
4980 if (!dev->block_cfg_access)
4981 pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
4982 __builtin_return_address(0));
4983 pcibios_reset_secondary_bus(dev);
4984
4985 return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
4986 }
4987 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4988
pci_parent_bus_reset(struct pci_dev * dev,bool probe)4989 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
4990 {
4991 struct pci_dev *pdev;
4992
4993 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4994 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4995 return -ENOTTY;
4996
4997 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4998 if (pdev != dev)
4999 return -ENOTTY;
5000
5001 if (probe)
5002 return 0;
5003
5004 return pci_bridge_secondary_bus_reset(dev->bus->self);
5005 }
5006
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,bool probe)5007 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5008 {
5009 int rc = -ENOTTY;
5010
5011 if (!hotplug || !try_module_get(hotplug->owner))
5012 return rc;
5013
5014 if (hotplug->ops->reset_slot)
5015 rc = hotplug->ops->reset_slot(hotplug, probe);
5016
5017 module_put(hotplug->owner);
5018
5019 return rc;
5020 }
5021
pci_dev_reset_slot_function(struct pci_dev * dev,bool probe)5022 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5023 {
5024 if (dev->multifunction || dev->subordinate || !dev->slot ||
5025 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5026 return -ENOTTY;
5027
5028 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5029 }
5030
cxl_port_dvsec(struct pci_dev * dev)5031 static u16 cxl_port_dvsec(struct pci_dev *dev)
5032 {
5033 return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
5034 PCI_DVSEC_CXL_PORT);
5035 }
5036
cxl_sbr_masked(struct pci_dev * dev)5037 static bool cxl_sbr_masked(struct pci_dev *dev)
5038 {
5039 u16 dvsec, reg;
5040 int rc;
5041
5042 dvsec = cxl_port_dvsec(dev);
5043 if (!dvsec)
5044 return false;
5045
5046 rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, ®);
5047 if (rc || PCI_POSSIBLE_ERROR(reg))
5048 return false;
5049
5050 /*
5051 * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR
5052 * bit in Bridge Control has no effect. When 1, the Port generates
5053 * hot reset when the SBR bit is set to 1.
5054 */
5055 if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR)
5056 return false;
5057
5058 return true;
5059 }
5060
pci_reset_bus_function(struct pci_dev * dev,bool probe)5061 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5062 {
5063 struct pci_dev *bridge = pci_upstream_bridge(dev);
5064 int rc;
5065
5066 /*
5067 * If "dev" is below a CXL port that has SBR control masked, SBR
5068 * won't do anything, so return error.
5069 */
5070 if (bridge && cxl_sbr_masked(bridge)) {
5071 if (probe)
5072 return 0;
5073
5074 return -ENOTTY;
5075 }
5076
5077 rc = pci_dev_reset_slot_function(dev, probe);
5078 if (rc != -ENOTTY)
5079 return rc;
5080 return pci_parent_bus_reset(dev, probe);
5081 }
5082
cxl_reset_bus_function(struct pci_dev * dev,bool probe)5083 static int cxl_reset_bus_function(struct pci_dev *dev, bool probe)
5084 {
5085 struct pci_dev *bridge;
5086 u16 dvsec, reg, val;
5087 int rc;
5088
5089 bridge = pci_upstream_bridge(dev);
5090 if (!bridge)
5091 return -ENOTTY;
5092
5093 dvsec = cxl_port_dvsec(bridge);
5094 if (!dvsec)
5095 return -ENOTTY;
5096
5097 if (probe)
5098 return 0;
5099
5100 rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, ®);
5101 if (rc)
5102 return -ENOTTY;
5103
5104 if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) {
5105 val = reg;
5106 } else {
5107 val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR;
5108 pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
5109 val);
5110 }
5111
5112 rc = pci_reset_bus_function(dev, probe);
5113
5114 if (reg != val)
5115 pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
5116 reg);
5117
5118 return rc;
5119 }
5120
pci_dev_lock(struct pci_dev * dev)5121 void pci_dev_lock(struct pci_dev *dev)
5122 {
5123 /* block PM suspend, driver probe, etc. */
5124 device_lock(&dev->dev);
5125 pci_cfg_access_lock(dev);
5126 }
5127 EXPORT_SYMBOL_GPL(pci_dev_lock);
5128
5129 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)5130 int pci_dev_trylock(struct pci_dev *dev)
5131 {
5132 if (device_trylock(&dev->dev)) {
5133 if (pci_cfg_access_trylock(dev))
5134 return 1;
5135 device_unlock(&dev->dev);
5136 }
5137
5138 return 0;
5139 }
5140 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5141
pci_dev_unlock(struct pci_dev * dev)5142 void pci_dev_unlock(struct pci_dev *dev)
5143 {
5144 pci_cfg_access_unlock(dev);
5145 device_unlock(&dev->dev);
5146 }
5147 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5148
pci_dev_save_and_disable(struct pci_dev * dev)5149 static void pci_dev_save_and_disable(struct pci_dev *dev)
5150 {
5151 const struct pci_error_handlers *err_handler =
5152 dev->driver ? dev->driver->err_handler : NULL;
5153
5154 /*
5155 * dev->driver->err_handler->reset_prepare() is protected against
5156 * races with ->remove() by the device lock, which must be held by
5157 * the caller.
5158 */
5159 if (err_handler && err_handler->reset_prepare)
5160 err_handler->reset_prepare(dev);
5161 else if (dev->driver)
5162 pci_warn(dev, "resetting");
5163
5164 /*
5165 * Wake-up device prior to save. PM registers default to D0 after
5166 * reset and a simple register restore doesn't reliably return
5167 * to a non-D0 state anyway.
5168 */
5169 pci_set_power_state(dev, PCI_D0);
5170
5171 pci_save_state(dev);
5172 /*
5173 * Disable the device by clearing the Command register, except for
5174 * INTx-disable which is set. This not only disables MMIO and I/O port
5175 * BARs, but also prevents the device from being Bus Master, preventing
5176 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
5177 * compliant devices, INTx-disable prevents legacy interrupts.
5178 */
5179 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5180 }
5181
pci_dev_restore(struct pci_dev * dev)5182 static void pci_dev_restore(struct pci_dev *dev)
5183 {
5184 const struct pci_error_handlers *err_handler =
5185 dev->driver ? dev->driver->err_handler : NULL;
5186
5187 pci_restore_state(dev);
5188
5189 /*
5190 * dev->driver->err_handler->reset_done() is protected against
5191 * races with ->remove() by the device lock, which must be held by
5192 * the caller.
5193 */
5194 if (err_handler && err_handler->reset_done)
5195 err_handler->reset_done(dev);
5196 else if (dev->driver)
5197 pci_warn(dev, "reset done");
5198 }
5199
5200 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5201 const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5202 { },
5203 { pci_dev_specific_reset, .name = "device_specific" },
5204 { pci_dev_acpi_reset, .name = "acpi" },
5205 { pcie_reset_flr, .name = "flr" },
5206 { pci_af_flr, .name = "af_flr" },
5207 { pci_pm_reset, .name = "pm" },
5208 { pci_reset_bus_function, .name = "bus" },
5209 { cxl_reset_bus_function, .name = "cxl_bus" },
5210 };
5211
5212 /**
5213 * __pci_reset_function_locked - reset a PCI device function while holding
5214 * the @dev mutex lock.
5215 * @dev: PCI device to reset
5216 *
5217 * Some devices allow an individual function to be reset without affecting
5218 * other functions in the same device. The PCI device must be responsive
5219 * to PCI config space in order to use this function.
5220 *
5221 * The device function is presumed to be unused and the caller is holding
5222 * the device mutex lock when this function is called.
5223 *
5224 * Resetting the device will make the contents of PCI configuration space
5225 * random, so any caller of this must be prepared to reinitialise the
5226 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5227 * etc.
5228 *
5229 * Returns 0 if the device function was successfully reset or negative if the
5230 * device doesn't support resetting a single function.
5231 */
__pci_reset_function_locked(struct pci_dev * dev)5232 int __pci_reset_function_locked(struct pci_dev *dev)
5233 {
5234 int i, m, rc;
5235 const struct pci_reset_fn_method *method;
5236
5237 might_sleep();
5238
5239 /*
5240 * A reset method returns -ENOTTY if it doesn't support this device and
5241 * we should try the next method.
5242 *
5243 * If it returns 0 (success), we're finished. If it returns any other
5244 * error, we're also finished: this indicates that further reset
5245 * mechanisms might be broken on the device.
5246 */
5247 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5248 m = dev->reset_methods[i];
5249 if (!m)
5250 return -ENOTTY;
5251
5252 method = &pci_reset_fn_methods[m];
5253 pci_dbg(dev, "reset via %s\n", method->name);
5254 rc = method->reset_fn(dev, PCI_RESET_DO_RESET);
5255 if (!rc)
5256 return 0;
5257
5258 pci_dbg(dev, "%s failed with %d\n", method->name, rc);
5259 if (rc != -ENOTTY)
5260 return rc;
5261 }
5262
5263 return -ENOTTY;
5264 }
5265 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5266
5267 /**
5268 * pci_init_reset_methods - check whether device can be safely reset
5269 * and store supported reset mechanisms.
5270 * @dev: PCI device to check for reset mechanisms
5271 *
5272 * Some devices allow an individual function to be reset without affecting
5273 * other functions in the same device. The PCI device must be in D0-D3hot
5274 * state.
5275 *
5276 * Stores reset mechanisms supported by device in reset_methods byte array
5277 * which is a member of struct pci_dev.
5278 */
pci_init_reset_methods(struct pci_dev * dev)5279 void pci_init_reset_methods(struct pci_dev *dev)
5280 {
5281 int m, i, rc;
5282
5283 BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5284
5285 might_sleep();
5286
5287 i = 0;
5288 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5289 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5290 if (!rc)
5291 dev->reset_methods[i++] = m;
5292 else if (rc != -ENOTTY)
5293 break;
5294 }
5295
5296 dev->reset_methods[i] = 0;
5297 }
5298
5299 /**
5300 * pci_reset_function - quiesce and reset a PCI device function
5301 * @dev: PCI device to reset
5302 *
5303 * Some devices allow an individual function to be reset without affecting
5304 * other functions in the same device. The PCI device must be responsive
5305 * to PCI config space in order to use this function.
5306 *
5307 * This function does not just reset the PCI portion of a device, but
5308 * clears all the state associated with the device. This function differs
5309 * from __pci_reset_function_locked() in that it saves and restores device state
5310 * over the reset and takes the PCI device lock.
5311 *
5312 * Returns 0 if the device function was successfully reset or negative if the
5313 * device doesn't support resetting a single function.
5314 */
pci_reset_function(struct pci_dev * dev)5315 int pci_reset_function(struct pci_dev *dev)
5316 {
5317 struct pci_dev *bridge;
5318 int rc;
5319
5320 if (!pci_reset_supported(dev))
5321 return -ENOTTY;
5322
5323 /*
5324 * If there's no upstream bridge, no locking is needed since there is
5325 * no upstream bridge configuration to hold consistent.
5326 */
5327 bridge = pci_upstream_bridge(dev);
5328 if (bridge)
5329 pci_dev_lock(bridge);
5330
5331 pci_dev_lock(dev);
5332 pci_dev_save_and_disable(dev);
5333
5334 rc = __pci_reset_function_locked(dev);
5335
5336 pci_dev_restore(dev);
5337 pci_dev_unlock(dev);
5338
5339 if (bridge)
5340 pci_dev_unlock(bridge);
5341
5342 return rc;
5343 }
5344 EXPORT_SYMBOL_GPL(pci_reset_function);
5345
5346 /**
5347 * pci_reset_function_locked - quiesce and reset a PCI device function
5348 * @dev: PCI device to reset
5349 *
5350 * Some devices allow an individual function to be reset without affecting
5351 * other functions in the same device. The PCI device must be responsive
5352 * to PCI config space in order to use this function.
5353 *
5354 * This function does not just reset the PCI portion of a device, but
5355 * clears all the state associated with the device. This function differs
5356 * from __pci_reset_function_locked() in that it saves and restores device state
5357 * over the reset. It also differs from pci_reset_function() in that it
5358 * requires the PCI device lock to be held.
5359 *
5360 * Returns 0 if the device function was successfully reset or negative if the
5361 * device doesn't support resetting a single function.
5362 */
pci_reset_function_locked(struct pci_dev * dev)5363 int pci_reset_function_locked(struct pci_dev *dev)
5364 {
5365 int rc;
5366
5367 if (!pci_reset_supported(dev))
5368 return -ENOTTY;
5369
5370 pci_dev_save_and_disable(dev);
5371
5372 rc = __pci_reset_function_locked(dev);
5373
5374 pci_dev_restore(dev);
5375
5376 return rc;
5377 }
5378 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5379
5380 /**
5381 * pci_try_reset_function - quiesce and reset a PCI device function
5382 * @dev: PCI device to reset
5383 *
5384 * Same as above, except return -EAGAIN if unable to lock device.
5385 */
pci_try_reset_function(struct pci_dev * dev)5386 int pci_try_reset_function(struct pci_dev *dev)
5387 {
5388 int rc;
5389
5390 if (!pci_reset_supported(dev))
5391 return -ENOTTY;
5392
5393 if (!pci_dev_trylock(dev))
5394 return -EAGAIN;
5395
5396 pci_dev_save_and_disable(dev);
5397 rc = __pci_reset_function_locked(dev);
5398 pci_dev_restore(dev);
5399 pci_dev_unlock(dev);
5400
5401 return rc;
5402 }
5403 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5404
5405 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resettable(struct pci_bus * bus)5406 static bool pci_bus_resettable(struct pci_bus *bus)
5407 {
5408 struct pci_dev *dev;
5409
5410
5411 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5412 return false;
5413
5414 list_for_each_entry(dev, &bus->devices, bus_list) {
5415 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5416 (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5417 return false;
5418 }
5419
5420 return true;
5421 }
5422
5423 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5424 static void pci_bus_lock(struct pci_bus *bus)
5425 {
5426 struct pci_dev *dev;
5427
5428 pci_dev_lock(bus->self);
5429 list_for_each_entry(dev, &bus->devices, bus_list) {
5430 if (dev->subordinate)
5431 pci_bus_lock(dev->subordinate);
5432 else
5433 pci_dev_lock(dev);
5434 }
5435 }
5436
5437 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5438 static void pci_bus_unlock(struct pci_bus *bus)
5439 {
5440 struct pci_dev *dev;
5441
5442 list_for_each_entry(dev, &bus->devices, bus_list) {
5443 if (dev->subordinate)
5444 pci_bus_unlock(dev->subordinate);
5445 else
5446 pci_dev_unlock(dev);
5447 }
5448 pci_dev_unlock(bus->self);
5449 }
5450
5451 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5452 static int pci_bus_trylock(struct pci_bus *bus)
5453 {
5454 struct pci_dev *dev;
5455
5456 if (!pci_dev_trylock(bus->self))
5457 return 0;
5458
5459 list_for_each_entry(dev, &bus->devices, bus_list) {
5460 if (dev->subordinate) {
5461 if (!pci_bus_trylock(dev->subordinate))
5462 goto unlock;
5463 } else if (!pci_dev_trylock(dev))
5464 goto unlock;
5465 }
5466 return 1;
5467
5468 unlock:
5469 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5470 if (dev->subordinate)
5471 pci_bus_unlock(dev->subordinate);
5472 else
5473 pci_dev_unlock(dev);
5474 }
5475 pci_dev_unlock(bus->self);
5476 return 0;
5477 }
5478
5479 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resettable(struct pci_slot * slot)5480 static bool pci_slot_resettable(struct pci_slot *slot)
5481 {
5482 struct pci_dev *dev;
5483
5484 if (slot->bus->self &&
5485 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5486 return false;
5487
5488 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5489 if (!dev->slot || dev->slot != slot)
5490 continue;
5491 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5492 (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5493 return false;
5494 }
5495
5496 return true;
5497 }
5498
5499 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5500 static void pci_slot_lock(struct pci_slot *slot)
5501 {
5502 struct pci_dev *dev;
5503
5504 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5505 if (!dev->slot || dev->slot != slot)
5506 continue;
5507 if (dev->subordinate)
5508 pci_bus_lock(dev->subordinate);
5509 else
5510 pci_dev_lock(dev);
5511 }
5512 }
5513
5514 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5515 static void pci_slot_unlock(struct pci_slot *slot)
5516 {
5517 struct pci_dev *dev;
5518
5519 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5520 if (!dev->slot || dev->slot != slot)
5521 continue;
5522 if (dev->subordinate)
5523 pci_bus_unlock(dev->subordinate);
5524 else
5525 pci_dev_unlock(dev);
5526 }
5527 }
5528
5529 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5530 static int pci_slot_trylock(struct pci_slot *slot)
5531 {
5532 struct pci_dev *dev;
5533
5534 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5535 if (!dev->slot || dev->slot != slot)
5536 continue;
5537 if (dev->subordinate) {
5538 if (!pci_bus_trylock(dev->subordinate)) {
5539 pci_dev_unlock(dev);
5540 goto unlock;
5541 }
5542 } else if (!pci_dev_trylock(dev))
5543 goto unlock;
5544 }
5545 return 1;
5546
5547 unlock:
5548 list_for_each_entry_continue_reverse(dev,
5549 &slot->bus->devices, bus_list) {
5550 if (!dev->slot || dev->slot != slot)
5551 continue;
5552 if (dev->subordinate)
5553 pci_bus_unlock(dev->subordinate);
5554 else
5555 pci_dev_unlock(dev);
5556 }
5557 return 0;
5558 }
5559
5560 /*
5561 * Save and disable devices from the top of the tree down while holding
5562 * the @dev mutex lock for the entire tree.
5563 */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5564 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5565 {
5566 struct pci_dev *dev;
5567
5568 list_for_each_entry(dev, &bus->devices, bus_list) {
5569 pci_dev_save_and_disable(dev);
5570 if (dev->subordinate)
5571 pci_bus_save_and_disable_locked(dev->subordinate);
5572 }
5573 }
5574
5575 /*
5576 * Restore devices from top of the tree down while holding @dev mutex lock
5577 * for the entire tree. Parent bridges need to be restored before we can
5578 * get to subordinate devices.
5579 */
pci_bus_restore_locked(struct pci_bus * bus)5580 static void pci_bus_restore_locked(struct pci_bus *bus)
5581 {
5582 struct pci_dev *dev;
5583
5584 list_for_each_entry(dev, &bus->devices, bus_list) {
5585 pci_dev_restore(dev);
5586 if (dev->subordinate) {
5587 pci_bridge_wait_for_secondary_bus(dev, "bus reset");
5588 pci_bus_restore_locked(dev->subordinate);
5589 }
5590 }
5591 }
5592
5593 /*
5594 * Save and disable devices from the top of the tree down while holding
5595 * the @dev mutex lock for the entire tree.
5596 */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5597 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5598 {
5599 struct pci_dev *dev;
5600
5601 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5602 if (!dev->slot || dev->slot != slot)
5603 continue;
5604 pci_dev_save_and_disable(dev);
5605 if (dev->subordinate)
5606 pci_bus_save_and_disable_locked(dev->subordinate);
5607 }
5608 }
5609
5610 /*
5611 * Restore devices from top of the tree down while holding @dev mutex lock
5612 * for the entire tree. Parent bridges need to be restored before we can
5613 * get to subordinate devices.
5614 */
pci_slot_restore_locked(struct pci_slot * slot)5615 static void pci_slot_restore_locked(struct pci_slot *slot)
5616 {
5617 struct pci_dev *dev;
5618
5619 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5620 if (!dev->slot || dev->slot != slot)
5621 continue;
5622 pci_dev_restore(dev);
5623 if (dev->subordinate) {
5624 pci_bridge_wait_for_secondary_bus(dev, "slot reset");
5625 pci_bus_restore_locked(dev->subordinate);
5626 }
5627 }
5628 }
5629
pci_slot_reset(struct pci_slot * slot,bool probe)5630 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5631 {
5632 int rc;
5633
5634 if (!slot || !pci_slot_resettable(slot))
5635 return -ENOTTY;
5636
5637 if (!probe)
5638 pci_slot_lock(slot);
5639
5640 might_sleep();
5641
5642 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5643
5644 if (!probe)
5645 pci_slot_unlock(slot);
5646
5647 return rc;
5648 }
5649
5650 /**
5651 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5652 * @slot: PCI slot to probe
5653 *
5654 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5655 */
pci_probe_reset_slot(struct pci_slot * slot)5656 int pci_probe_reset_slot(struct pci_slot *slot)
5657 {
5658 return pci_slot_reset(slot, PCI_RESET_PROBE);
5659 }
5660 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5661
5662 /**
5663 * __pci_reset_slot - Try to reset a PCI slot
5664 * @slot: PCI slot to reset
5665 *
5666 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5667 * independent of other slots. For instance, some slots may support slot power
5668 * control. In the case of a 1:1 bus to slot architecture, this function may
5669 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5670 * Generally a slot reset should be attempted before a bus reset. All of the
5671 * function of the slot and any subordinate buses behind the slot are reset
5672 * through this function. PCI config space of all devices in the slot and
5673 * behind the slot is saved before and restored after reset.
5674 *
5675 * Same as above except return -EAGAIN if the slot cannot be locked
5676 */
__pci_reset_slot(struct pci_slot * slot)5677 static int __pci_reset_slot(struct pci_slot *slot)
5678 {
5679 int rc;
5680
5681 rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5682 if (rc)
5683 return rc;
5684
5685 if (pci_slot_trylock(slot)) {
5686 pci_slot_save_and_disable_locked(slot);
5687 might_sleep();
5688 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5689 pci_slot_restore_locked(slot);
5690 pci_slot_unlock(slot);
5691 } else
5692 rc = -EAGAIN;
5693
5694 return rc;
5695 }
5696
pci_bus_reset(struct pci_bus * bus,bool probe)5697 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5698 {
5699 int ret;
5700
5701 if (!bus->self || !pci_bus_resettable(bus))
5702 return -ENOTTY;
5703
5704 if (probe)
5705 return 0;
5706
5707 pci_bus_lock(bus);
5708
5709 might_sleep();
5710
5711 ret = pci_bridge_secondary_bus_reset(bus->self);
5712
5713 pci_bus_unlock(bus);
5714
5715 return ret;
5716 }
5717
5718 /**
5719 * pci_bus_error_reset - reset the bridge's subordinate bus
5720 * @bridge: The parent device that connects to the bus to reset
5721 *
5722 * This function will first try to reset the slots on this bus if the method is
5723 * available. If slot reset fails or is not available, this will fall back to a
5724 * secondary bus reset.
5725 */
pci_bus_error_reset(struct pci_dev * bridge)5726 int pci_bus_error_reset(struct pci_dev *bridge)
5727 {
5728 struct pci_bus *bus = bridge->subordinate;
5729 struct pci_slot *slot;
5730
5731 if (!bus)
5732 return -ENOTTY;
5733
5734 mutex_lock(&pci_slot_mutex);
5735 if (list_empty(&bus->slots))
5736 goto bus_reset;
5737
5738 list_for_each_entry(slot, &bus->slots, list)
5739 if (pci_probe_reset_slot(slot))
5740 goto bus_reset;
5741
5742 list_for_each_entry(slot, &bus->slots, list)
5743 if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5744 goto bus_reset;
5745
5746 mutex_unlock(&pci_slot_mutex);
5747 return 0;
5748 bus_reset:
5749 mutex_unlock(&pci_slot_mutex);
5750 return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5751 }
5752
5753 /**
5754 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5755 * @bus: PCI bus to probe
5756 *
5757 * Return 0 if bus can be reset, negative if a bus reset is not supported.
5758 */
pci_probe_reset_bus(struct pci_bus * bus)5759 int pci_probe_reset_bus(struct pci_bus *bus)
5760 {
5761 return pci_bus_reset(bus, PCI_RESET_PROBE);
5762 }
5763 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5764
5765 /**
5766 * __pci_reset_bus - Try to reset a PCI bus
5767 * @bus: top level PCI bus to reset
5768 *
5769 * Same as above except return -EAGAIN if the bus cannot be locked
5770 */
__pci_reset_bus(struct pci_bus * bus)5771 int __pci_reset_bus(struct pci_bus *bus)
5772 {
5773 int rc;
5774
5775 rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5776 if (rc)
5777 return rc;
5778
5779 if (pci_bus_trylock(bus)) {
5780 pci_bus_save_and_disable_locked(bus);
5781 might_sleep();
5782 rc = pci_bridge_secondary_bus_reset(bus->self);
5783 pci_bus_restore_locked(bus);
5784 pci_bus_unlock(bus);
5785 } else
5786 rc = -EAGAIN;
5787
5788 return rc;
5789 }
5790
5791 /**
5792 * pci_reset_bus - Try to reset a PCI bus
5793 * @pdev: top level PCI device to reset via slot/bus
5794 *
5795 * Same as above except return -EAGAIN if the bus cannot be locked
5796 */
pci_reset_bus(struct pci_dev * pdev)5797 int pci_reset_bus(struct pci_dev *pdev)
5798 {
5799 return (!pci_probe_reset_slot(pdev->slot)) ?
5800 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5801 }
5802 EXPORT_SYMBOL_GPL(pci_reset_bus);
5803
5804 /**
5805 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5806 * @dev: PCI device to query
5807 *
5808 * Returns mmrbc: maximum designed memory read count in bytes or
5809 * appropriate error value.
5810 */
pcix_get_max_mmrbc(struct pci_dev * dev)5811 int pcix_get_max_mmrbc(struct pci_dev *dev)
5812 {
5813 int cap;
5814 u32 stat;
5815
5816 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5817 if (!cap)
5818 return -EINVAL;
5819
5820 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5821 return -EINVAL;
5822
5823 return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat);
5824 }
5825 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5826
5827 /**
5828 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5829 * @dev: PCI device to query
5830 *
5831 * Returns mmrbc: maximum memory read count in bytes or appropriate error
5832 * value.
5833 */
pcix_get_mmrbc(struct pci_dev * dev)5834 int pcix_get_mmrbc(struct pci_dev *dev)
5835 {
5836 int cap;
5837 u16 cmd;
5838
5839 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5840 if (!cap)
5841 return -EINVAL;
5842
5843 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5844 return -EINVAL;
5845
5846 return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5847 }
5848 EXPORT_SYMBOL(pcix_get_mmrbc);
5849
5850 /**
5851 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5852 * @dev: PCI device to query
5853 * @mmrbc: maximum memory read count in bytes
5854 * valid values are 512, 1024, 2048, 4096
5855 *
5856 * If possible sets maximum memory read byte count, some bridges have errata
5857 * that prevent this.
5858 */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)5859 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5860 {
5861 int cap;
5862 u32 stat, v, o;
5863 u16 cmd;
5864
5865 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5866 return -EINVAL;
5867
5868 v = ffs(mmrbc) - 10;
5869
5870 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5871 if (!cap)
5872 return -EINVAL;
5873
5874 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5875 return -EINVAL;
5876
5877 if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat))
5878 return -E2BIG;
5879
5880 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5881 return -EINVAL;
5882
5883 o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5884 if (o != v) {
5885 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5886 return -EIO;
5887
5888 cmd &= ~PCI_X_CMD_MAX_READ;
5889 cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v);
5890 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5891 return -EIO;
5892 }
5893 return 0;
5894 }
5895 EXPORT_SYMBOL(pcix_set_mmrbc);
5896
5897 /**
5898 * pcie_get_readrq - get PCI Express read request size
5899 * @dev: PCI device to query
5900 *
5901 * Returns maximum memory read request in bytes or appropriate error value.
5902 */
pcie_get_readrq(struct pci_dev * dev)5903 int pcie_get_readrq(struct pci_dev *dev)
5904 {
5905 u16 ctl;
5906
5907 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5908
5909 return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl);
5910 }
5911 EXPORT_SYMBOL(pcie_get_readrq);
5912
5913 /**
5914 * pcie_set_readrq - set PCI Express maximum memory read request
5915 * @dev: PCI device to query
5916 * @rq: maximum memory read count in bytes
5917 * valid values are 128, 256, 512, 1024, 2048, 4096
5918 *
5919 * If possible sets maximum memory read request in bytes
5920 */
pcie_set_readrq(struct pci_dev * dev,int rq)5921 int pcie_set_readrq(struct pci_dev *dev, int rq)
5922 {
5923 u16 v;
5924 int ret;
5925 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
5926
5927 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5928 return -EINVAL;
5929
5930 /*
5931 * If using the "performance" PCIe config, we clamp the read rq
5932 * size to the max packet size to keep the host bridge from
5933 * generating requests larger than we can cope with.
5934 */
5935 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5936 int mps = pcie_get_mps(dev);
5937
5938 if (mps < rq)
5939 rq = mps;
5940 }
5941
5942 v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, ffs(rq) - 8);
5943
5944 if (bridge->no_inc_mrrs) {
5945 int max_mrrs = pcie_get_readrq(dev);
5946
5947 if (rq > max_mrrs) {
5948 pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
5949 return -EINVAL;
5950 }
5951 }
5952
5953 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5954 PCI_EXP_DEVCTL_READRQ, v);
5955
5956 return pcibios_err_to_errno(ret);
5957 }
5958 EXPORT_SYMBOL(pcie_set_readrq);
5959
5960 /**
5961 * pcie_get_mps - get PCI Express maximum payload size
5962 * @dev: PCI device to query
5963 *
5964 * Returns maximum payload size in bytes
5965 */
pcie_get_mps(struct pci_dev * dev)5966 int pcie_get_mps(struct pci_dev *dev)
5967 {
5968 u16 ctl;
5969
5970 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5971
5972 return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl);
5973 }
5974 EXPORT_SYMBOL(pcie_get_mps);
5975
5976 /**
5977 * pcie_set_mps - set PCI Express maximum payload size
5978 * @dev: PCI device to query
5979 * @mps: maximum payload size in bytes
5980 * valid values are 128, 256, 512, 1024, 2048, 4096
5981 *
5982 * If possible sets maximum payload size
5983 */
pcie_set_mps(struct pci_dev * dev,int mps)5984 int pcie_set_mps(struct pci_dev *dev, int mps)
5985 {
5986 u16 v;
5987 int ret;
5988
5989 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5990 return -EINVAL;
5991
5992 v = ffs(mps) - 8;
5993 if (v > dev->pcie_mpss)
5994 return -EINVAL;
5995 v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v);
5996
5997 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5998 PCI_EXP_DEVCTL_PAYLOAD, v);
5999
6000 return pcibios_err_to_errno(ret);
6001 }
6002 EXPORT_SYMBOL(pcie_set_mps);
6003
to_pcie_link_speed(u16 lnksta)6004 static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
6005 {
6006 return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
6007 }
6008
pcie_link_speed_mbps(struct pci_dev * pdev)6009 int pcie_link_speed_mbps(struct pci_dev *pdev)
6010 {
6011 u16 lnksta;
6012 int err;
6013
6014 err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
6015 if (err)
6016 return err;
6017
6018 return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta));
6019 }
6020 EXPORT_SYMBOL(pcie_link_speed_mbps);
6021
6022 /**
6023 * pcie_bandwidth_available - determine minimum link settings of a PCIe
6024 * device and its bandwidth limitation
6025 * @dev: PCI device to query
6026 * @limiting_dev: storage for device causing the bandwidth limitation
6027 * @speed: storage for speed of limiting device
6028 * @width: storage for width of limiting device
6029 *
6030 * Walk up the PCI device chain and find the point where the minimum
6031 * bandwidth is available. Return the bandwidth available there and (if
6032 * limiting_dev, speed, and width pointers are supplied) information about
6033 * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of
6034 * raw bandwidth.
6035 */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6036 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6037 enum pci_bus_speed *speed,
6038 enum pcie_link_width *width)
6039 {
6040 u16 lnksta;
6041 enum pci_bus_speed next_speed;
6042 enum pcie_link_width next_width;
6043 u32 bw, next_bw;
6044
6045 if (speed)
6046 *speed = PCI_SPEED_UNKNOWN;
6047 if (width)
6048 *width = PCIE_LNK_WIDTH_UNKNOWN;
6049
6050 bw = 0;
6051
6052 while (dev) {
6053 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6054
6055 next_speed = to_pcie_link_speed(lnksta);
6056 next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
6057
6058 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6059
6060 /* Check if current device limits the total bandwidth */
6061 if (!bw || next_bw <= bw) {
6062 bw = next_bw;
6063
6064 if (limiting_dev)
6065 *limiting_dev = dev;
6066 if (speed)
6067 *speed = next_speed;
6068 if (width)
6069 *width = next_width;
6070 }
6071
6072 dev = pci_upstream_bridge(dev);
6073 }
6074
6075 return bw;
6076 }
6077 EXPORT_SYMBOL(pcie_bandwidth_available);
6078
6079 /**
6080 * pcie_get_supported_speeds - query Supported Link Speed Vector
6081 * @dev: PCI device to query
6082 *
6083 * Query @dev supported link speeds.
6084 *
6085 * Implementation Note in PCIe r6.0 sec 7.5.3.18 recommends determining
6086 * supported link speeds using the Supported Link Speeds Vector in the Link
6087 * Capabilities 2 Register (when available).
6088 *
6089 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.
6090 *
6091 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, Supported Link
6092 * Speeds field in Link Capabilities is used and only 2.5 GT/s and 5.0 GT/s
6093 * speeds were defined.
6094 *
6095 * For @dev without Supported Link Speed Vector, the field is synthesized
6096 * from the Max Link Speed field in the Link Capabilities Register.
6097 *
6098 * Return: Supported Link Speeds Vector (+ reserved 0 at LSB).
6099 */
pcie_get_supported_speeds(struct pci_dev * dev)6100 u8 pcie_get_supported_speeds(struct pci_dev *dev)
6101 {
6102 u32 lnkcap2, lnkcap;
6103 u8 speeds;
6104
6105 /*
6106 * Speeds retain the reserved 0 at LSB before PCIe Supported Link
6107 * Speeds Vector to allow using SLS Vector bit defines directly.
6108 */
6109 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6110 speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS;
6111
6112 /* Ignore speeds higher than Max Link Speed */
6113 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6114 speeds &= GENMASK(lnkcap & PCI_EXP_LNKCAP_SLS, 0);
6115
6116 /* PCIe r3.0-compliant */
6117 if (speeds)
6118 return speeds;
6119
6120 /* Synthesize from the Max Link Speed field */
6121 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6122 speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB;
6123 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6124 speeds = PCI_EXP_LNKCAP2_SLS_2_5GB;
6125
6126 return speeds;
6127 }
6128
6129 /**
6130 * pcie_get_speed_cap - query for the PCI device's link speed capability
6131 * @dev: PCI device to query
6132 *
6133 * Query the PCI device speed capability.
6134 *
6135 * Return: the maximum link speed supported by the device.
6136 */
pcie_get_speed_cap(struct pci_dev * dev)6137 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6138 {
6139 return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds);
6140 }
6141 EXPORT_SYMBOL(pcie_get_speed_cap);
6142
6143 /**
6144 * pcie_get_width_cap - query for the PCI device's link width capability
6145 * @dev: PCI device to query
6146 *
6147 * Query the PCI device width capability. Return the maximum link width
6148 * supported by the device.
6149 */
pcie_get_width_cap(struct pci_dev * dev)6150 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6151 {
6152 u32 lnkcap;
6153
6154 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6155 if (lnkcap)
6156 return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
6157
6158 return PCIE_LNK_WIDTH_UNKNOWN;
6159 }
6160 EXPORT_SYMBOL(pcie_get_width_cap);
6161
6162 /**
6163 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6164 * @dev: PCI device
6165 * @speed: storage for link speed
6166 * @width: storage for link width
6167 *
6168 * Calculate a PCI device's link bandwidth by querying for its link speed
6169 * and width, multiplying them, and applying encoding overhead. The result
6170 * is in Mb/s, i.e., megabits/second of raw bandwidth.
6171 */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6172 static u32 pcie_bandwidth_capable(struct pci_dev *dev,
6173 enum pci_bus_speed *speed,
6174 enum pcie_link_width *width)
6175 {
6176 *speed = pcie_get_speed_cap(dev);
6177 *width = pcie_get_width_cap(dev);
6178
6179 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6180 return 0;
6181
6182 return *width * PCIE_SPEED2MBS_ENC(*speed);
6183 }
6184
6185 /**
6186 * __pcie_print_link_status - Report the PCI device's link speed and width
6187 * @dev: PCI device to query
6188 * @verbose: Print info even when enough bandwidth is available
6189 *
6190 * If the available bandwidth at the device is less than the device is
6191 * capable of, report the device's maximum possible bandwidth and the
6192 * upstream link that limits its performance. If @verbose, always print
6193 * the available bandwidth, even if the device isn't constrained.
6194 */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)6195 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6196 {
6197 enum pcie_link_width width, width_cap;
6198 enum pci_bus_speed speed, speed_cap;
6199 struct pci_dev *limiting_dev = NULL;
6200 u32 bw_avail, bw_cap;
6201 char *flit_mode = "";
6202
6203 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6204 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6205
6206 if (dev->bus && dev->bus->flit_mode)
6207 flit_mode = ", in Flit mode";
6208
6209 if (bw_avail >= bw_cap && verbose)
6210 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)%s\n",
6211 bw_cap / 1000, bw_cap % 1000,
6212 pci_speed_string(speed_cap), width_cap, flit_mode);
6213 else if (bw_avail < bw_cap)
6214 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)%s\n",
6215 bw_avail / 1000, bw_avail % 1000,
6216 pci_speed_string(speed), width,
6217 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6218 bw_cap / 1000, bw_cap % 1000,
6219 pci_speed_string(speed_cap), width_cap, flit_mode);
6220 }
6221
6222 /**
6223 * pcie_print_link_status - Report the PCI device's link speed and width
6224 * @dev: PCI device to query
6225 *
6226 * Report the available bandwidth at the device.
6227 */
pcie_print_link_status(struct pci_dev * dev)6228 void pcie_print_link_status(struct pci_dev *dev)
6229 {
6230 __pcie_print_link_status(dev, true);
6231 }
6232 EXPORT_SYMBOL(pcie_print_link_status);
6233
6234 /**
6235 * pci_select_bars - Make BAR mask from the type of resource
6236 * @dev: the PCI device for which BAR mask is made
6237 * @flags: resource type mask to be selected
6238 *
6239 * This helper routine makes bar mask from the type of resource.
6240 */
pci_select_bars(struct pci_dev * dev,unsigned long flags)6241 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6242 {
6243 int i, bars = 0;
6244 for (i = 0; i < PCI_NUM_RESOURCES; i++)
6245 if (pci_resource_flags(dev, i) & flags)
6246 bars |= (1 << i);
6247 return bars;
6248 }
6249 EXPORT_SYMBOL(pci_select_bars);
6250
6251 /* Some architectures require additional programming to enable VGA */
6252 static arch_set_vga_state_t arch_set_vga_state;
6253
pci_register_set_vga_state(arch_set_vga_state_t func)6254 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6255 {
6256 arch_set_vga_state = func; /* NULL disables */
6257 }
6258
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6259 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6260 unsigned int command_bits, u32 flags)
6261 {
6262 if (arch_set_vga_state)
6263 return arch_set_vga_state(dev, decode, command_bits,
6264 flags);
6265 return 0;
6266 }
6267
6268 /**
6269 * pci_set_vga_state - set VGA decode state on device and parents if requested
6270 * @dev: the PCI device
6271 * @decode: true = enable decoding, false = disable decoding
6272 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6273 * @flags: traverse ancestors and change bridges
6274 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6275 */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6276 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6277 unsigned int command_bits, u32 flags)
6278 {
6279 struct pci_bus *bus;
6280 struct pci_dev *bridge;
6281 u16 cmd;
6282 int rc;
6283
6284 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6285
6286 /* ARCH specific VGA enables */
6287 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6288 if (rc)
6289 return rc;
6290
6291 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6292 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6293 if (decode)
6294 cmd |= command_bits;
6295 else
6296 cmd &= ~command_bits;
6297 pci_write_config_word(dev, PCI_COMMAND, cmd);
6298 }
6299
6300 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6301 return 0;
6302
6303 bus = dev->bus;
6304 while (bus) {
6305 bridge = bus->self;
6306 if (bridge) {
6307 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6308 &cmd);
6309 if (decode)
6310 cmd |= PCI_BRIDGE_CTL_VGA;
6311 else
6312 cmd &= ~PCI_BRIDGE_CTL_VGA;
6313 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6314 cmd);
6315 }
6316 bus = bus->parent;
6317 }
6318 return 0;
6319 }
6320
6321 #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)6322 bool pci_pr3_present(struct pci_dev *pdev)
6323 {
6324 struct acpi_device *adev;
6325
6326 if (acpi_disabled)
6327 return false;
6328
6329 adev = ACPI_COMPANION(&pdev->dev);
6330 if (!adev)
6331 return false;
6332
6333 return adev->power.flags.power_resources &&
6334 acpi_has_method(adev->handle, "_PR3");
6335 }
6336 EXPORT_SYMBOL_GPL(pci_pr3_present);
6337 #endif
6338
6339 /**
6340 * pci_add_dma_alias - Add a DMA devfn alias for a device
6341 * @dev: the PCI device for which alias is added
6342 * @devfn_from: alias slot and function
6343 * @nr_devfns: number of subsequent devfns to alias
6344 *
6345 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6346 * which is used to program permissible bus-devfn source addresses for DMA
6347 * requests in an IOMMU. These aliases factor into IOMMU group creation
6348 * and are useful for devices generating DMA requests beyond or different
6349 * from their logical bus-devfn. Examples include device quirks where the
6350 * device simply uses the wrong devfn, as well as non-transparent bridges
6351 * where the alias may be a proxy for devices in another domain.
6352 *
6353 * IOMMU group creation is performed during device discovery or addition,
6354 * prior to any potential DMA mapping and therefore prior to driver probing
6355 * (especially for userspace assigned devices where IOMMU group definition
6356 * cannot be left as a userspace activity). DMA aliases should therefore
6357 * be configured via quirks, such as the PCI fixup header quirk.
6358 */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn_from,unsigned int nr_devfns)6359 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6360 unsigned int nr_devfns)
6361 {
6362 int devfn_to;
6363
6364 nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6365 devfn_to = devfn_from + nr_devfns - 1;
6366
6367 if (!dev->dma_alias_mask)
6368 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6369 if (!dev->dma_alias_mask) {
6370 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6371 return;
6372 }
6373
6374 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6375
6376 if (nr_devfns == 1)
6377 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6378 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6379 else if (nr_devfns > 1)
6380 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6381 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6382 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6383 }
6384
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)6385 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6386 {
6387 return (dev1->dma_alias_mask &&
6388 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6389 (dev2->dma_alias_mask &&
6390 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6391 pci_real_dma_dev(dev1) == dev2 ||
6392 pci_real_dma_dev(dev2) == dev1;
6393 }
6394
pci_device_is_present(struct pci_dev * pdev)6395 bool pci_device_is_present(struct pci_dev *pdev)
6396 {
6397 u32 v;
6398
6399 /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6400 pdev = pci_physfn(pdev);
6401 if (pci_dev_is_disconnected(pdev))
6402 return false;
6403 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6404 }
6405 EXPORT_SYMBOL_GPL(pci_device_is_present);
6406
pci_ignore_hotplug(struct pci_dev * dev)6407 void pci_ignore_hotplug(struct pci_dev *dev)
6408 {
6409 struct pci_dev *bridge = dev->bus->self;
6410
6411 dev->ignore_hotplug = 1;
6412 /* Propagate the "ignore hotplug" setting to the parent bridge. */
6413 if (bridge)
6414 bridge->ignore_hotplug = 1;
6415 }
6416 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6417
6418 /**
6419 * pci_real_dma_dev - Get PCI DMA device for PCI device
6420 * @dev: the PCI device that may have a PCI DMA alias
6421 *
6422 * Permits the platform to provide architecture-specific functionality to
6423 * devices needing to alias DMA to another PCI device on another PCI bus. If
6424 * the PCI device is on the same bus, it is recommended to use
6425 * pci_add_dma_alias(). This is the default implementation. Architecture
6426 * implementations can override this.
6427 */
pci_real_dma_dev(struct pci_dev * dev)6428 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6429 {
6430 return dev;
6431 }
6432
pcibios_default_alignment(void)6433 resource_size_t __weak pcibios_default_alignment(void)
6434 {
6435 return 0;
6436 }
6437
6438 /*
6439 * Arches that don't want to expose struct resource to userland as-is in
6440 * sysfs and /proc can implement their own pci_resource_to_user().
6441 */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)6442 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6443 const struct resource *rsrc,
6444 resource_size_t *start, resource_size_t *end)
6445 {
6446 *start = rsrc->start;
6447 *end = rsrc->end;
6448 }
6449
6450 static char *resource_alignment_param;
6451 static DEFINE_SPINLOCK(resource_alignment_lock);
6452
6453 /**
6454 * pci_specified_resource_alignment - get resource alignment specified by user.
6455 * @dev: the PCI device to get
6456 * @resize: whether or not to change resources' size when reassigning alignment
6457 *
6458 * RETURNS: Resource alignment if it is specified.
6459 * Zero if it is not specified.
6460 */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)6461 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6462 bool *resize)
6463 {
6464 int align_order, count;
6465 resource_size_t align = pcibios_default_alignment();
6466 const char *p;
6467 int ret;
6468
6469 spin_lock(&resource_alignment_lock);
6470 p = resource_alignment_param;
6471 if (!p || !*p)
6472 goto out;
6473 if (pci_has_flag(PCI_PROBE_ONLY)) {
6474 align = 0;
6475 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6476 goto out;
6477 }
6478
6479 while (*p) {
6480 count = 0;
6481 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6482 p[count] == '@') {
6483 p += count + 1;
6484 if (align_order > 63) {
6485 pr_err("PCI: Invalid requested alignment (order %d)\n",
6486 align_order);
6487 align_order = PAGE_SHIFT;
6488 }
6489 } else {
6490 align_order = PAGE_SHIFT;
6491 }
6492
6493 ret = pci_dev_str_match(dev, p, &p);
6494 if (ret == 1) {
6495 *resize = true;
6496 align = 1ULL << align_order;
6497 break;
6498 } else if (ret < 0) {
6499 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6500 p);
6501 break;
6502 }
6503
6504 if (*p != ';' && *p != ',') {
6505 /* End of param or invalid format */
6506 break;
6507 }
6508 p++;
6509 }
6510 out:
6511 spin_unlock(&resource_alignment_lock);
6512 return align;
6513 }
6514
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6515 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6516 resource_size_t align, bool resize)
6517 {
6518 struct resource *r = &dev->resource[bar];
6519 const char *r_name = pci_resource_name(dev, bar);
6520 resource_size_t size;
6521
6522 if (!(r->flags & IORESOURCE_MEM))
6523 return;
6524
6525 if (r->flags & IORESOURCE_PCI_FIXED) {
6526 pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
6527 r_name, r, (unsigned long long)align);
6528 return;
6529 }
6530
6531 size = resource_size(r);
6532 if (size >= align)
6533 return;
6534
6535 /*
6536 * Increase the alignment of the resource. There are two ways we
6537 * can do this:
6538 *
6539 * 1) Increase the size of the resource. BARs are aligned on their
6540 * size, so when we reallocate space for this resource, we'll
6541 * allocate it with the larger alignment. This also prevents
6542 * assignment of any other BARs inside the alignment region, so
6543 * if we're requesting page alignment, this means no other BARs
6544 * will share the page.
6545 *
6546 * The disadvantage is that this makes the resource larger than
6547 * the hardware BAR, which may break drivers that compute things
6548 * based on the resource size, e.g., to find registers at a
6549 * fixed offset before the end of the BAR.
6550 *
6551 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6552 * set r->start to the desired alignment. By itself this
6553 * doesn't prevent other BARs being put inside the alignment
6554 * region, but if we realign *every* resource of every device in
6555 * the system, none of them will share an alignment region.
6556 *
6557 * When the user has requested alignment for only some devices via
6558 * the "pci=resource_alignment" argument, "resize" is true and we
6559 * use the first method. Otherwise we assume we're aligning all
6560 * devices and we use the second.
6561 */
6562
6563 pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
6564 r_name, r, (unsigned long long)align);
6565
6566 if (resize) {
6567 r->start = 0;
6568 r->end = align - 1;
6569 } else {
6570 r->flags &= ~IORESOURCE_SIZEALIGN;
6571 r->flags |= IORESOURCE_STARTALIGN;
6572 resource_set_range(r, align, size);
6573 }
6574 r->flags |= IORESOURCE_UNSET;
6575 }
6576
6577 /*
6578 * This function disables memory decoding and releases memory resources
6579 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6580 * It also rounds up size to specified alignment.
6581 * Later on, the kernel will assign page-aligned memory resource back
6582 * to the device.
6583 */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6584 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6585 {
6586 int i;
6587 struct resource *r;
6588 resource_size_t align;
6589 u16 command;
6590 bool resize = false;
6591
6592 /*
6593 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6594 * 3.4.1.11. Their resources are allocated from the space
6595 * described by the VF BARx register in the PF's SR-IOV capability.
6596 * We can't influence their alignment here.
6597 */
6598 if (dev->is_virtfn)
6599 return;
6600
6601 /* check if specified PCI is target device to reassign */
6602 align = pci_specified_resource_alignment(dev, &resize);
6603 if (!align)
6604 return;
6605
6606 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6607 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6608 pci_warn(dev, "Can't reassign resources to host bridge\n");
6609 return;
6610 }
6611
6612 pci_read_config_word(dev, PCI_COMMAND, &command);
6613 command &= ~PCI_COMMAND_MEMORY;
6614 pci_write_config_word(dev, PCI_COMMAND, command);
6615
6616 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6617 pci_request_resource_alignment(dev, i, align, resize);
6618
6619 /*
6620 * Need to disable bridge's resource window,
6621 * to enable the kernel to reassign new resource
6622 * window later on.
6623 */
6624 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6625 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6626 r = &dev->resource[i];
6627 if (!(r->flags & IORESOURCE_MEM))
6628 continue;
6629 r->flags |= IORESOURCE_UNSET;
6630 r->end = resource_size(r) - 1;
6631 r->start = 0;
6632 }
6633 pci_disable_bridge_window(dev);
6634 }
6635 }
6636
resource_alignment_show(const struct bus_type * bus,char * buf)6637 static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf)
6638 {
6639 size_t count = 0;
6640
6641 spin_lock(&resource_alignment_lock);
6642 if (resource_alignment_param)
6643 count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6644 spin_unlock(&resource_alignment_lock);
6645
6646 return count;
6647 }
6648
resource_alignment_store(const struct bus_type * bus,const char * buf,size_t count)6649 static ssize_t resource_alignment_store(const struct bus_type *bus,
6650 const char *buf, size_t count)
6651 {
6652 char *param, *old, *end;
6653
6654 if (count >= (PAGE_SIZE - 1))
6655 return -EINVAL;
6656
6657 param = kstrndup(buf, count, GFP_KERNEL);
6658 if (!param)
6659 return -ENOMEM;
6660
6661 end = strchr(param, '\n');
6662 if (end)
6663 *end = '\0';
6664
6665 spin_lock(&resource_alignment_lock);
6666 old = resource_alignment_param;
6667 if (strlen(param)) {
6668 resource_alignment_param = param;
6669 } else {
6670 kfree(param);
6671 resource_alignment_param = NULL;
6672 }
6673 spin_unlock(&resource_alignment_lock);
6674
6675 kfree(old);
6676
6677 return count;
6678 }
6679
6680 static BUS_ATTR_RW(resource_alignment);
6681
pci_resource_alignment_sysfs_init(void)6682 static int __init pci_resource_alignment_sysfs_init(void)
6683 {
6684 return bus_create_file(&pci_bus_type,
6685 &bus_attr_resource_alignment);
6686 }
6687 late_initcall(pci_resource_alignment_sysfs_init);
6688
pci_no_domains(void)6689 static void pci_no_domains(void)
6690 {
6691 #ifdef CONFIG_PCI_DOMAINS
6692 pci_domains_supported = 0;
6693 #endif
6694 }
6695
6696 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6697 static DEFINE_IDA(pci_domain_nr_static_ida);
6698 static DEFINE_IDA(pci_domain_nr_dynamic_ida);
6699
of_pci_reserve_static_domain_nr(void)6700 static void of_pci_reserve_static_domain_nr(void)
6701 {
6702 struct device_node *np;
6703 int domain_nr;
6704
6705 for_each_node_by_type(np, "pci") {
6706 domain_nr = of_get_pci_domain_nr(np);
6707 if (domain_nr < 0)
6708 continue;
6709 /*
6710 * Permanently allocate domain_nr in dynamic_ida
6711 * to prevent it from dynamic allocation.
6712 */
6713 ida_alloc_range(&pci_domain_nr_dynamic_ida,
6714 domain_nr, domain_nr, GFP_KERNEL);
6715 }
6716 }
6717
of_pci_bus_find_domain_nr(struct device * parent)6718 static int of_pci_bus_find_domain_nr(struct device *parent)
6719 {
6720 static bool static_domains_reserved = false;
6721 int domain_nr;
6722
6723 /* On the first call scan device tree for static allocations. */
6724 if (!static_domains_reserved) {
6725 of_pci_reserve_static_domain_nr();
6726 static_domains_reserved = true;
6727 }
6728
6729 if (parent) {
6730 /*
6731 * If domain is in DT, allocate it in static IDA. This
6732 * prevents duplicate static allocations in case of errors
6733 * in DT.
6734 */
6735 domain_nr = of_get_pci_domain_nr(parent->of_node);
6736 if (domain_nr >= 0)
6737 return ida_alloc_range(&pci_domain_nr_static_ida,
6738 domain_nr, domain_nr,
6739 GFP_KERNEL);
6740 }
6741
6742 /*
6743 * If domain was not specified in DT, choose a free ID from dynamic
6744 * allocations. All domain numbers from DT are permanently in
6745 * dynamic allocations to prevent assigning them to other DT nodes
6746 * without static domain.
6747 */
6748 return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
6749 }
6750
of_pci_bus_release_domain_nr(struct device * parent,int domain_nr)6751 static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6752 {
6753 if (domain_nr < 0)
6754 return;
6755
6756 /* Release domain from IDA where it was allocated. */
6757 if (of_get_pci_domain_nr(parent->of_node) == domain_nr)
6758 ida_free(&pci_domain_nr_static_ida, domain_nr);
6759 else
6760 ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
6761 }
6762
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6763 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6764 {
6765 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6766 acpi_pci_bus_find_domain_nr(bus);
6767 }
6768
pci_bus_release_domain_nr(struct device * parent,int domain_nr)6769 void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6770 {
6771 if (!acpi_disabled)
6772 return;
6773 of_pci_bus_release_domain_nr(parent, domain_nr);
6774 }
6775 #endif
6776
6777 /**
6778 * pci_ext_cfg_avail - can we access extended PCI config space?
6779 *
6780 * Returns 1 if we can access PCI extended config space (offsets
6781 * greater than 0xff). This is the default implementation. Architecture
6782 * implementations can override this.
6783 */
pci_ext_cfg_avail(void)6784 int __weak pci_ext_cfg_avail(void)
6785 {
6786 return 1;
6787 }
6788
pci_setup(char * str)6789 static int __init pci_setup(char *str)
6790 {
6791 while (str) {
6792 char *k = strchr(str, ',');
6793 if (k)
6794 *k++ = 0;
6795 if (*str && (str = pcibios_setup(str)) && *str) {
6796 if (!strcmp(str, "nomsi")) {
6797 pci_no_msi();
6798 } else if (!strncmp(str, "noats", 5)) {
6799 pr_info("PCIe: ATS is disabled\n");
6800 pcie_ats_disabled = true;
6801 } else if (!strcmp(str, "noaer")) {
6802 pci_no_aer();
6803 } else if (!strcmp(str, "earlydump")) {
6804 pci_early_dump = true;
6805 } else if (!strncmp(str, "realloc=", 8)) {
6806 pci_realloc_get_opt(str + 8);
6807 } else if (!strncmp(str, "realloc", 7)) {
6808 pci_realloc_get_opt("on");
6809 } else if (!strcmp(str, "nodomains")) {
6810 pci_no_domains();
6811 } else if (!strncmp(str, "noari", 5)) {
6812 pcie_ari_disabled = true;
6813 } else if (!strncmp(str, "notph", 5)) {
6814 pci_no_tph();
6815 } else if (!strncmp(str, "cbiosize=", 9)) {
6816 pci_cardbus_io_size = memparse(str + 9, &str);
6817 } else if (!strncmp(str, "cbmemsize=", 10)) {
6818 pci_cardbus_mem_size = memparse(str + 10, &str);
6819 } else if (!strncmp(str, "resource_alignment=", 19)) {
6820 resource_alignment_param = str + 19;
6821 } else if (!strncmp(str, "ecrc=", 5)) {
6822 pcie_ecrc_get_policy(str + 5);
6823 } else if (!strncmp(str, "hpiosize=", 9)) {
6824 pci_hotplug_io_size = memparse(str + 9, &str);
6825 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6826 pci_hotplug_mmio_size = memparse(str + 11, &str);
6827 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6828 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6829 } else if (!strncmp(str, "hpmemsize=", 10)) {
6830 pci_hotplug_mmio_size = memparse(str + 10, &str);
6831 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6832 } else if (!strncmp(str, "hpbussize=", 10)) {
6833 pci_hotplug_bus_size =
6834 simple_strtoul(str + 10, &str, 0);
6835 if (pci_hotplug_bus_size > 0xff)
6836 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6837 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6838 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6839 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6840 pcie_bus_config = PCIE_BUS_SAFE;
6841 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6842 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6843 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6844 pcie_bus_config = PCIE_BUS_PEER2PEER;
6845 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6846 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6847 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6848 disable_acs_redir_param = str + 18;
6849 } else if (!strncmp(str, "config_acs=", 11)) {
6850 config_acs_param = str + 11;
6851 } else {
6852 pr_err("PCI: Unknown option `%s'\n", str);
6853 }
6854 }
6855 str = k;
6856 }
6857 return 0;
6858 }
6859 early_param("pci", pci_setup);
6860
6861 /*
6862 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6863 * in pci_setup(), above, to point to data in the __initdata section which
6864 * will be freed after the init sequence is complete. We can't allocate memory
6865 * in pci_setup() because some architectures do not have any memory allocation
6866 * service available during an early_param() call. So we allocate memory and
6867 * copy the variable here before the init section is freed.
6868 *
6869 */
pci_realloc_setup_params(void)6870 static int __init pci_realloc_setup_params(void)
6871 {
6872 resource_alignment_param = kstrdup(resource_alignment_param,
6873 GFP_KERNEL);
6874 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6875 config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
6876
6877 return 0;
6878 }
6879 pure_initcall(pci_realloc_setup_params);
6880