1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 *
5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6 * David Mosberger-Tang
7 *
8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9 */
10
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/pci_hotplug.h>
29 #include <linux/vmalloc.h>
30 #include <asm/dma.h>
31 #include <linux/aer.h>
32 #include <linux/bitfield.h>
33 #include "pci.h"
34
35 DEFINE_MUTEX(pci_slot_mutex);
36
37 const char *pci_power_names[] = {
38 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
39 };
40 EXPORT_SYMBOL_GPL(pci_power_names);
41
42 #ifdef CONFIG_X86_32
43 int isa_dma_bridge_buggy;
44 EXPORT_SYMBOL(isa_dma_bridge_buggy);
45 #endif
46
47 int pci_pci_problems;
48 EXPORT_SYMBOL(pci_pci_problems);
49
50 unsigned int pci_pm_d3hot_delay;
51
52 static void pci_pme_list_scan(struct work_struct *work);
53
54 static LIST_HEAD(pci_pme_list);
55 static DEFINE_MUTEX(pci_pme_list_mutex);
56 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
57
58 struct pci_pme_device {
59 struct list_head list;
60 struct pci_dev *dev;
61 };
62
63 #define PME_TIMEOUT 1000 /* How long between PME checks */
64
65 /*
66 * Following exit from Conventional Reset, devices must be ready within 1 sec
67 * (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional
68 * Reset (PCIe r6.0 sec 5.8).
69 */
70 #define PCI_RESET_WAIT 1000 /* msec */
71
72 /*
73 * Devices may extend the 1 sec period through Request Retry Status
74 * completions (PCIe r6.0 sec 2.3.1). The spec does not provide an upper
75 * limit, but 60 sec ought to be enough for any device to become
76 * responsive.
77 */
78 #define PCIE_RESET_READY_POLL_MS 60000 /* msec */
79
pci_dev_d3_sleep(struct pci_dev * dev)80 static void pci_dev_d3_sleep(struct pci_dev *dev)
81 {
82 unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
83 unsigned int upper;
84
85 if (delay_ms) {
86 /* Use a 20% upper bound, 1ms minimum */
87 upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
88 usleep_range(delay_ms * USEC_PER_MSEC,
89 (delay_ms + upper) * USEC_PER_MSEC);
90 }
91 }
92
pci_reset_supported(struct pci_dev * dev)93 bool pci_reset_supported(struct pci_dev *dev)
94 {
95 return dev->reset_methods[0] != 0;
96 }
97
98 #ifdef CONFIG_PCI_DOMAINS
99 int pci_domains_supported = 1;
100 #endif
101
102 #define DEFAULT_CARDBUS_IO_SIZE (256)
103 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
104 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
105 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
106 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
107
108 #define DEFAULT_HOTPLUG_IO_SIZE (256)
109 #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
110 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
111 /* hpiosize=nn can override this */
112 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
113 /*
114 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
115 * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
116 * pci=hpmemsize=nnM overrides both
117 */
118 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
119 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
120
121 #define DEFAULT_HOTPLUG_BUS_SIZE 1
122 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
123
124
125 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
126 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
127 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
128 #elif defined CONFIG_PCIE_BUS_SAFE
129 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
130 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
131 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
132 #elif defined CONFIG_PCIE_BUS_PEER2PEER
133 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
134 #else
135 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
136 #endif
137
138 /*
139 * The default CLS is used if arch didn't set CLS explicitly and not
140 * all pci devices agree on the same value. Arch can override either
141 * the dfl or actual value as it sees fit. Don't forget this is
142 * measured in 32-bit words, not bytes.
143 */
144 u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2;
145 u8 pci_cache_line_size __ro_after_init ;
146
147 /*
148 * If we set up a device for bus mastering, we need to check the latency
149 * timer as certain BIOSes forget to set it properly.
150 */
151 unsigned int pcibios_max_latency = 255;
152
153 /* If set, the PCIe ARI capability will not be used. */
154 static bool pcie_ari_disabled;
155
156 /* If set, the PCIe ATS capability will not be used. */
157 static bool pcie_ats_disabled;
158
159 /* If set, the PCI config space of each device is printed during boot. */
160 bool pci_early_dump;
161
pci_ats_disabled(void)162 bool pci_ats_disabled(void)
163 {
164 return pcie_ats_disabled;
165 }
166 EXPORT_SYMBOL_GPL(pci_ats_disabled);
167
168 /* Disable bridge_d3 for all PCIe ports */
169 static bool pci_bridge_d3_disable;
170 /* Force bridge_d3 for all PCIe ports */
171 static bool pci_bridge_d3_force;
172
pcie_port_pm_setup(char * str)173 static int __init pcie_port_pm_setup(char *str)
174 {
175 if (!strcmp(str, "off"))
176 pci_bridge_d3_disable = true;
177 else if (!strcmp(str, "force"))
178 pci_bridge_d3_force = true;
179 return 1;
180 }
181 __setup("pcie_port_pm=", pcie_port_pm_setup);
182
183 /**
184 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
185 * @bus: pointer to PCI bus structure to search
186 *
187 * Given a PCI bus, returns the highest PCI bus number present in the set
188 * including the given PCI bus and its list of child PCI buses.
189 */
pci_bus_max_busnr(struct pci_bus * bus)190 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
191 {
192 struct pci_bus *tmp;
193 unsigned char max, n;
194
195 max = bus->busn_res.end;
196 list_for_each_entry(tmp, &bus->children, node) {
197 n = pci_bus_max_busnr(tmp);
198 if (n > max)
199 max = n;
200 }
201 return max;
202 }
203 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
204
205 /**
206 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
207 * @pdev: the PCI device
208 *
209 * Returns error bits set in PCI_STATUS and clears them.
210 */
pci_status_get_and_clear_errors(struct pci_dev * pdev)211 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
212 {
213 u16 status;
214 int ret;
215
216 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
217 if (ret != PCIBIOS_SUCCESSFUL)
218 return -EIO;
219
220 status &= PCI_STATUS_ERROR_BITS;
221 if (status)
222 pci_write_config_word(pdev, PCI_STATUS, status);
223
224 return status;
225 }
226 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
227
228 #ifdef CONFIG_HAS_IOMEM
__pci_ioremap_resource(struct pci_dev * pdev,int bar,bool write_combine)229 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
230 bool write_combine)
231 {
232 struct resource *res = &pdev->resource[bar];
233 resource_size_t start = res->start;
234 resource_size_t size = resource_size(res);
235
236 /*
237 * Make sure the BAR is actually a memory resource, not an IO resource
238 */
239 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
240 pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
241 return NULL;
242 }
243
244 if (write_combine)
245 return ioremap_wc(start, size);
246
247 return ioremap(start, size);
248 }
249
pci_ioremap_bar(struct pci_dev * pdev,int bar)250 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
251 {
252 return __pci_ioremap_resource(pdev, bar, false);
253 }
254 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
255
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)256 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
257 {
258 return __pci_ioremap_resource(pdev, bar, true);
259 }
260 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
261 #endif
262
263 /**
264 * pci_dev_str_match_path - test if a path string matches a device
265 * @dev: the PCI device to test
266 * @path: string to match the device against
267 * @endptr: pointer to the string after the match
268 *
269 * Test if a string (typically from a kernel parameter) formatted as a
270 * path of device/function addresses matches a PCI device. The string must
271 * be of the form:
272 *
273 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
274 *
275 * A path for a device can be obtained using 'lspci -t'. Using a path
276 * is more robust against bus renumbering than using only a single bus,
277 * device and function address.
278 *
279 * Returns 1 if the string matches the device, 0 if it does not and
280 * a negative error code if it fails to parse the string.
281 */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)282 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
283 const char **endptr)
284 {
285 int ret;
286 unsigned int seg, bus, slot, func;
287 char *wpath, *p;
288 char end;
289
290 *endptr = strchrnul(path, ';');
291
292 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
293 if (!wpath)
294 return -ENOMEM;
295
296 while (1) {
297 p = strrchr(wpath, '/');
298 if (!p)
299 break;
300 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
301 if (ret != 2) {
302 ret = -EINVAL;
303 goto free_and_exit;
304 }
305
306 if (dev->devfn != PCI_DEVFN(slot, func)) {
307 ret = 0;
308 goto free_and_exit;
309 }
310
311 /*
312 * Note: we don't need to get a reference to the upstream
313 * bridge because we hold a reference to the top level
314 * device which should hold a reference to the bridge,
315 * and so on.
316 */
317 dev = pci_upstream_bridge(dev);
318 if (!dev) {
319 ret = 0;
320 goto free_and_exit;
321 }
322
323 *p = 0;
324 }
325
326 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
327 &func, &end);
328 if (ret != 4) {
329 seg = 0;
330 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
331 if (ret != 3) {
332 ret = -EINVAL;
333 goto free_and_exit;
334 }
335 }
336
337 ret = (seg == pci_domain_nr(dev->bus) &&
338 bus == dev->bus->number &&
339 dev->devfn == PCI_DEVFN(slot, func));
340
341 free_and_exit:
342 kfree(wpath);
343 return ret;
344 }
345
346 /**
347 * pci_dev_str_match - test if a string matches a device
348 * @dev: the PCI device to test
349 * @p: string to match the device against
350 * @endptr: pointer to the string after the match
351 *
352 * Test if a string (typically from a kernel parameter) matches a specified
353 * PCI device. The string may be of one of the following formats:
354 *
355 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
356 * pci:<vendor>:<device>[:<subvendor>:<subdevice>]
357 *
358 * The first format specifies a PCI bus/device/function address which
359 * may change if new hardware is inserted, if motherboard firmware changes,
360 * or due to changes caused in kernel parameters. If the domain is
361 * left unspecified, it is taken to be 0. In order to be robust against
362 * bus renumbering issues, a path of PCI device/function numbers may be used
363 * to address the specific device. The path for a device can be determined
364 * through the use of 'lspci -t'.
365 *
366 * The second format matches devices using IDs in the configuration
367 * space which may match multiple devices in the system. A value of 0
368 * for any field will match all devices. (Note: this differs from
369 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
370 * legacy reasons and convenience so users don't have to specify
371 * FFFFFFFFs on the command line.)
372 *
373 * Returns 1 if the string matches the device, 0 if it does not and
374 * a negative error code if the string cannot be parsed.
375 */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)376 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
377 const char **endptr)
378 {
379 int ret;
380 int count;
381 unsigned short vendor, device, subsystem_vendor, subsystem_device;
382
383 if (strncmp(p, "pci:", 4) == 0) {
384 /* PCI vendor/device (subvendor/subdevice) IDs are specified */
385 p += 4;
386 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
387 &subsystem_vendor, &subsystem_device, &count);
388 if (ret != 4) {
389 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
390 if (ret != 2)
391 return -EINVAL;
392
393 subsystem_vendor = 0;
394 subsystem_device = 0;
395 }
396
397 p += count;
398
399 if ((!vendor || vendor == dev->vendor) &&
400 (!device || device == dev->device) &&
401 (!subsystem_vendor ||
402 subsystem_vendor == dev->subsystem_vendor) &&
403 (!subsystem_device ||
404 subsystem_device == dev->subsystem_device))
405 goto found;
406 } else {
407 /*
408 * PCI Bus, Device, Function IDs are specified
409 * (optionally, may include a path of devfns following it)
410 */
411 ret = pci_dev_str_match_path(dev, p, &p);
412 if (ret < 0)
413 return ret;
414 else if (ret)
415 goto found;
416 }
417
418 *endptr = p;
419 return 0;
420
421 found:
422 *endptr = p;
423 return 1;
424 }
425
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)426 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
427 u8 pos, int cap)
428 {
429 return PCI_FIND_NEXT_CAP(pci_bus_read_config, pos, cap, bus, devfn);
430 }
431
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)432 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
433 {
434 return __pci_find_next_cap(dev->bus, dev->devfn,
435 pos + PCI_CAP_LIST_NEXT, cap);
436 }
437 EXPORT_SYMBOL_GPL(pci_find_next_capability);
438
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)439 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
440 unsigned int devfn, u8 hdr_type)
441 {
442 u16 status;
443
444 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
445 if (!(status & PCI_STATUS_CAP_LIST))
446 return 0;
447
448 switch (hdr_type) {
449 case PCI_HEADER_TYPE_NORMAL:
450 case PCI_HEADER_TYPE_BRIDGE:
451 return PCI_CAPABILITY_LIST;
452 case PCI_HEADER_TYPE_CARDBUS:
453 return PCI_CB_CAPABILITY_LIST;
454 }
455
456 return 0;
457 }
458
459 /**
460 * pci_find_capability - query for devices' capabilities
461 * @dev: PCI device to query
462 * @cap: capability code
463 *
464 * Tell if a device supports a given PCI capability.
465 * Returns the address of the requested capability structure within the
466 * device's PCI configuration space or 0 in case the device does not
467 * support it. Possible values for @cap include:
468 *
469 * %PCI_CAP_ID_PM Power Management
470 * %PCI_CAP_ID_AGP Accelerated Graphics Port
471 * %PCI_CAP_ID_VPD Vital Product Data
472 * %PCI_CAP_ID_SLOTID Slot Identification
473 * %PCI_CAP_ID_MSI Message Signalled Interrupts
474 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
475 * %PCI_CAP_ID_PCIX PCI-X
476 * %PCI_CAP_ID_EXP PCI Express
477 */
pci_find_capability(struct pci_dev * dev,int cap)478 u8 pci_find_capability(struct pci_dev *dev, int cap)
479 {
480 u8 pos;
481
482 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
483 if (pos)
484 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
485
486 return pos;
487 }
488 EXPORT_SYMBOL(pci_find_capability);
489
490 /**
491 * pci_bus_find_capability - query for devices' capabilities
492 * @bus: the PCI bus to query
493 * @devfn: PCI device to query
494 * @cap: capability code
495 *
496 * Like pci_find_capability() but works for PCI devices that do not have a
497 * pci_dev structure set up yet.
498 *
499 * Returns the address of the requested capability structure within the
500 * device's PCI configuration space or 0 in case the device does not
501 * support it.
502 */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)503 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
504 {
505 u8 hdr_type, pos;
506
507 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
508
509 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK);
510 if (pos)
511 pos = __pci_find_next_cap(bus, devfn, pos, cap);
512
513 return pos;
514 }
515 EXPORT_SYMBOL(pci_bus_find_capability);
516
517 /**
518 * pci_find_next_ext_capability - Find an extended capability
519 * @dev: PCI device to query
520 * @start: address at which to start looking (0 to start at beginning of list)
521 * @cap: capability code
522 *
523 * Returns the address of the next matching extended capability structure
524 * within the device's PCI configuration space or 0 if the device does
525 * not support it. Some capabilities can occur several times, e.g., the
526 * vendor-specific capability, and this provides a way to find them all.
527 */
pci_find_next_ext_capability(struct pci_dev * dev,u16 start,int cap)528 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
529 {
530 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
531 return 0;
532
533 return PCI_FIND_NEXT_EXT_CAP(pci_bus_read_config, start, cap,
534 dev->bus, dev->devfn);
535 }
536 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
537
538 /**
539 * pci_find_ext_capability - Find an extended capability
540 * @dev: PCI device to query
541 * @cap: capability code
542 *
543 * Returns the address of the requested extended capability structure
544 * within the device's PCI configuration space or 0 if the device does
545 * not support it. Possible values for @cap include:
546 *
547 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
548 * %PCI_EXT_CAP_ID_VC Virtual Channel
549 * %PCI_EXT_CAP_ID_DSN Device Serial Number
550 * %PCI_EXT_CAP_ID_PWR Power Budgeting
551 */
pci_find_ext_capability(struct pci_dev * dev,int cap)552 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
553 {
554 return pci_find_next_ext_capability(dev, 0, cap);
555 }
556 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
557
558 /**
559 * pci_get_dsn - Read and return the 8-byte Device Serial Number
560 * @dev: PCI device to query
561 *
562 * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
563 * Number.
564 *
565 * Returns the DSN, or zero if the capability does not exist.
566 */
pci_get_dsn(struct pci_dev * dev)567 u64 pci_get_dsn(struct pci_dev *dev)
568 {
569 u32 dword;
570 u64 dsn;
571 int pos;
572
573 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
574 if (!pos)
575 return 0;
576
577 /*
578 * The Device Serial Number is two dwords offset 4 bytes from the
579 * capability position. The specification says that the first dword is
580 * the lower half, and the second dword is the upper half.
581 */
582 pos += 4;
583 pci_read_config_dword(dev, pos, &dword);
584 dsn = (u64)dword;
585 pci_read_config_dword(dev, pos + 4, &dword);
586 dsn |= ((u64)dword) << 32;
587
588 return dsn;
589 }
590 EXPORT_SYMBOL_GPL(pci_get_dsn);
591
__pci_find_next_ht_cap(struct pci_dev * dev,u8 pos,int ht_cap)592 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
593 {
594 int rc;
595 u8 cap, mask;
596
597 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
598 mask = HT_3BIT_CAP_MASK;
599 else
600 mask = HT_5BIT_CAP_MASK;
601
602 pos = PCI_FIND_NEXT_CAP(pci_bus_read_config, pos,
603 PCI_CAP_ID_HT, dev->bus, dev->devfn);
604 while (pos) {
605 rc = pci_read_config_byte(dev, pos + 3, &cap);
606 if (rc != PCIBIOS_SUCCESSFUL)
607 return 0;
608
609 if ((cap & mask) == ht_cap)
610 return pos;
611
612 pos = PCI_FIND_NEXT_CAP(pci_bus_read_config,
613 pos + PCI_CAP_LIST_NEXT,
614 PCI_CAP_ID_HT, dev->bus,
615 dev->devfn);
616 }
617
618 return 0;
619 }
620
621 /**
622 * pci_find_next_ht_capability - query a device's HyperTransport capabilities
623 * @dev: PCI device to query
624 * @pos: Position from which to continue searching
625 * @ht_cap: HyperTransport capability code
626 *
627 * To be used in conjunction with pci_find_ht_capability() to search for
628 * all capabilities matching @ht_cap. @pos should always be a value returned
629 * from pci_find_ht_capability().
630 *
631 * NB. To be 100% safe against broken PCI devices, the caller should take
632 * steps to avoid an infinite loop.
633 */
pci_find_next_ht_capability(struct pci_dev * dev,u8 pos,int ht_cap)634 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
635 {
636 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
637 }
638 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
639
640 /**
641 * pci_find_ht_capability - query a device's HyperTransport capabilities
642 * @dev: PCI device to query
643 * @ht_cap: HyperTransport capability code
644 *
645 * Tell if a device supports a given HyperTransport capability.
646 * Returns an address within the device's PCI configuration space
647 * or 0 in case the device does not support the request capability.
648 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
649 * which has a HyperTransport capability matching @ht_cap.
650 */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)651 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
652 {
653 u8 pos;
654
655 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
656 if (pos)
657 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
658
659 return pos;
660 }
661 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
662
663 /**
664 * pci_find_vsec_capability - Find a vendor-specific extended capability
665 * @dev: PCI device to query
666 * @vendor: Vendor ID for which capability is defined
667 * @cap: Vendor-specific capability ID
668 *
669 * If @dev has Vendor ID @vendor, search for a VSEC capability with
670 * VSEC ID @cap. If found, return the capability offset in
671 * config space; otherwise return 0.
672 */
pci_find_vsec_capability(struct pci_dev * dev,u16 vendor,int cap)673 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
674 {
675 u16 vsec = 0;
676 u32 header;
677 int ret;
678
679 if (vendor != dev->vendor)
680 return 0;
681
682 while ((vsec = pci_find_next_ext_capability(dev, vsec,
683 PCI_EXT_CAP_ID_VNDR))) {
684 ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
685 if (ret != PCIBIOS_SUCCESSFUL)
686 continue;
687
688 if (PCI_VNDR_HEADER_ID(header) == cap)
689 return vsec;
690 }
691
692 return 0;
693 }
694 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
695
696 /**
697 * pci_find_dvsec_capability - Find DVSEC for vendor
698 * @dev: PCI device to query
699 * @vendor: Vendor ID to match for the DVSEC
700 * @dvsec: Designated Vendor-specific capability ID
701 *
702 * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
703 * offset in config space; otherwise return 0.
704 */
pci_find_dvsec_capability(struct pci_dev * dev,u16 vendor,u16 dvsec)705 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
706 {
707 int pos;
708
709 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
710 if (!pos)
711 return 0;
712
713 while (pos) {
714 u16 v, id;
715
716 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
717 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
718 if (vendor == v && dvsec == id)
719 return pos;
720
721 pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
722 }
723
724 return 0;
725 }
726 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
727
728 /**
729 * pci_find_parent_resource - return resource region of parent bus of given
730 * region
731 * @dev: PCI device structure contains resources to be searched
732 * @res: child resource record for which parent is sought
733 *
734 * For given resource region of given device, return the resource region of
735 * parent bus the given region is contained in.
736 */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)737 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
738 struct resource *res)
739 {
740 const struct pci_bus *bus = dev->bus;
741 struct resource *r;
742
743 pci_bus_for_each_resource(bus, r) {
744 if (!r)
745 continue;
746 if (resource_contains(r, res)) {
747
748 /*
749 * If the window is prefetchable but the BAR is
750 * not, the allocator made a mistake.
751 */
752 if (r->flags & IORESOURCE_PREFETCH &&
753 !(res->flags & IORESOURCE_PREFETCH))
754 return NULL;
755
756 /*
757 * If we're below a transparent bridge, there may
758 * be both a positively-decoded aperture and a
759 * subtractively-decoded region that contain the BAR.
760 * We want the positively-decoded one, so this depends
761 * on pci_bus_for_each_resource() giving us those
762 * first.
763 */
764 return r;
765 }
766 }
767 return NULL;
768 }
769 EXPORT_SYMBOL(pci_find_parent_resource);
770
771 /**
772 * pci_find_resource - Return matching PCI device resource
773 * @dev: PCI device to query
774 * @res: Resource to look for
775 *
776 * Goes over standard PCI resources (BARs) and checks if the given resource
777 * is partially or fully contained in any of them. In that case the
778 * matching resource is returned, %NULL otherwise.
779 */
pci_find_resource(struct pci_dev * dev,struct resource * res)780 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
781 {
782 int i;
783
784 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
785 struct resource *r = &dev->resource[i];
786
787 if (r->start && resource_contains(r, res))
788 return r;
789 }
790
791 return NULL;
792 }
793 EXPORT_SYMBOL(pci_find_resource);
794
795 /**
796 * pci_resource_name - Return the name of the PCI resource
797 * @dev: PCI device to query
798 * @i: index of the resource
799 *
800 * Return the standard PCI resource (BAR) name according to their index.
801 */
pci_resource_name(struct pci_dev * dev,unsigned int i)802 const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
803 {
804 static const char * const bar_name[] = {
805 "BAR 0",
806 "BAR 1",
807 "BAR 2",
808 "BAR 3",
809 "BAR 4",
810 "BAR 5",
811 "ROM",
812 #ifdef CONFIG_PCI_IOV
813 "VF BAR 0",
814 "VF BAR 1",
815 "VF BAR 2",
816 "VF BAR 3",
817 "VF BAR 4",
818 "VF BAR 5",
819 #endif
820 "bridge window", /* "io" included in %pR */
821 "bridge window", /* "mem" included in %pR */
822 "bridge window", /* "mem pref" included in %pR */
823 };
824 static const char * const cardbus_name[] = {
825 "BAR 1",
826 "unknown",
827 "unknown",
828 "unknown",
829 "unknown",
830 "unknown",
831 #ifdef CONFIG_PCI_IOV
832 "unknown",
833 "unknown",
834 "unknown",
835 "unknown",
836 "unknown",
837 "unknown",
838 #endif
839 "CardBus bridge window 0", /* I/O */
840 "CardBus bridge window 1", /* I/O */
841 "CardBus bridge window 0", /* mem */
842 "CardBus bridge window 1", /* mem */
843 };
844
845 if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
846 i < ARRAY_SIZE(cardbus_name))
847 return cardbus_name[i];
848
849 if (i < ARRAY_SIZE(bar_name))
850 return bar_name[i];
851
852 return "unknown";
853 }
854
855 /**
856 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
857 * @dev: the PCI device to operate on
858 * @pos: config space offset of status word
859 * @mask: mask of bit(s) to care about in status word
860 *
861 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
862 */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)863 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
864 {
865 int i;
866
867 /* Wait for Transaction Pending bit clean */
868 for (i = 0; i < 4; i++) {
869 u16 status;
870 if (i)
871 msleep((1 << (i - 1)) * 100);
872
873 pci_read_config_word(dev, pos, &status);
874 if (!(status & mask))
875 return 1;
876 }
877
878 return 0;
879 }
880
881 static int pci_acs_enable;
882
883 /**
884 * pci_request_acs - ask for ACS to be enabled if supported
885 */
pci_request_acs(void)886 void pci_request_acs(void)
887 {
888 pci_acs_enable = 1;
889 }
890
891 static const char *disable_acs_redir_param;
892 static const char *config_acs_param;
893
894 struct pci_acs {
895 u16 cap;
896 u16 ctrl;
897 u16 fw_ctrl;
898 };
899
__pci_config_acs(struct pci_dev * dev,struct pci_acs * caps,const char * p,const u16 acs_mask,const u16 acs_flags)900 static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
901 const char *p, const u16 acs_mask, const u16 acs_flags)
902 {
903 u16 flags = acs_flags;
904 u16 mask = acs_mask;
905 char *delimit;
906 int ret = 0;
907
908 if (!p)
909 return;
910
911 while (*p) {
912 if (!acs_mask) {
913 /* Check for ACS flags */
914 delimit = strstr(p, "@");
915 if (delimit) {
916 int end;
917 u32 shift = 0;
918
919 end = delimit - p - 1;
920 mask = 0;
921 flags = 0;
922
923 while (end > -1) {
924 if (*(p + end) == '0') {
925 mask |= 1 << shift;
926 shift++;
927 end--;
928 } else if (*(p + end) == '1') {
929 mask |= 1 << shift;
930 flags |= 1 << shift;
931 shift++;
932 end--;
933 } else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
934 shift++;
935 end--;
936 } else {
937 pci_err(dev, "Invalid ACS flags... Ignoring\n");
938 return;
939 }
940 }
941 p = delimit + 1;
942 } else {
943 pci_err(dev, "ACS Flags missing\n");
944 return;
945 }
946 }
947
948 if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
949 PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
950 pci_err(dev, "Invalid ACS flags specified\n");
951 return;
952 }
953
954 ret = pci_dev_str_match(dev, p, &p);
955 if (ret < 0) {
956 pr_info_once("PCI: Can't parse ACS command line parameter\n");
957 break;
958 } else if (ret == 1) {
959 /* Found a match */
960 break;
961 }
962
963 if (*p != ';' && *p != ',') {
964 /* End of param or invalid format */
965 break;
966 }
967 p++;
968 }
969
970 if (ret != 1)
971 return;
972
973 if (!pci_dev_specific_disable_acs_redir(dev))
974 return;
975
976 pci_dbg(dev, "ACS mask = %#06x\n", mask);
977 pci_dbg(dev, "ACS flags = %#06x\n", flags);
978 pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl);
979 pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl);
980
981 /*
982 * For mask bits that are 0, copy them from the firmware setting
983 * and apply flags for all the mask bits that are 1.
984 */
985 caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask);
986
987 pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
988 }
989
990 /**
991 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
992 * @dev: the PCI device
993 * @caps: default ACS controls
994 */
pci_std_enable_acs(struct pci_dev * dev,struct pci_acs * caps)995 static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
996 {
997 /* Source Validation */
998 caps->ctrl |= (caps->cap & PCI_ACS_SV);
999
1000 /* P2P Request Redirect */
1001 caps->ctrl |= (caps->cap & PCI_ACS_RR);
1002
1003 /* P2P Completion Redirect */
1004 caps->ctrl |= (caps->cap & PCI_ACS_CR);
1005
1006 /* Upstream Forwarding */
1007 caps->ctrl |= (caps->cap & PCI_ACS_UF);
1008
1009 /* Enable Translation Blocking for external devices and noats */
1010 if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
1011 caps->ctrl |= (caps->cap & PCI_ACS_TB);
1012 }
1013
1014 /**
1015 * pci_enable_acs - enable ACS if hardware support it
1016 * @dev: the PCI device
1017 */
pci_enable_acs(struct pci_dev * dev)1018 static void pci_enable_acs(struct pci_dev *dev)
1019 {
1020 struct pci_acs caps;
1021 bool enable_acs = false;
1022 int pos;
1023
1024 /* If an iommu is present we start with kernel default caps */
1025 if (pci_acs_enable) {
1026 if (pci_dev_specific_enable_acs(dev))
1027 enable_acs = true;
1028 }
1029
1030 pos = dev->acs_cap;
1031 if (!pos)
1032 return;
1033
1034 pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
1035 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
1036 caps.fw_ctrl = caps.ctrl;
1037
1038 if (enable_acs)
1039 pci_std_enable_acs(dev, &caps);
1040
1041 /*
1042 * Always apply caps from the command line, even if there is no iommu.
1043 * Trust that the admin has a reason to change the ACS settings.
1044 */
1045 __pci_config_acs(dev, &caps, disable_acs_redir_param,
1046 PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
1047 ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
1048 __pci_config_acs(dev, &caps, config_acs_param, 0, 0);
1049
1050 pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
1051 }
1052
1053 /**
1054 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
1055 * @dev: PCI device to have its BARs restored
1056 *
1057 * Restore the BAR values for a given device, so as to make it
1058 * accessible by its driver.
1059 */
pci_restore_bars(struct pci_dev * dev)1060 static void pci_restore_bars(struct pci_dev *dev)
1061 {
1062 int i;
1063
1064 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1065 pci_update_resource(dev, i);
1066 }
1067
platform_pci_power_manageable(struct pci_dev * dev)1068 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1069 {
1070 if (pci_use_mid_pm())
1071 return true;
1072
1073 return acpi_pci_power_manageable(dev);
1074 }
1075
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)1076 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1077 pci_power_t t)
1078 {
1079 if (pci_use_mid_pm())
1080 return mid_pci_set_power_state(dev, t);
1081
1082 return acpi_pci_set_power_state(dev, t);
1083 }
1084
platform_pci_get_power_state(struct pci_dev * dev)1085 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1086 {
1087 if (pci_use_mid_pm())
1088 return mid_pci_get_power_state(dev);
1089
1090 return acpi_pci_get_power_state(dev);
1091 }
1092
platform_pci_refresh_power_state(struct pci_dev * dev)1093 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1094 {
1095 if (!pci_use_mid_pm())
1096 acpi_pci_refresh_power_state(dev);
1097 }
1098
platform_pci_choose_state(struct pci_dev * dev)1099 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1100 {
1101 if (pci_use_mid_pm())
1102 return PCI_POWER_ERROR;
1103
1104 return acpi_pci_choose_state(dev);
1105 }
1106
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)1107 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1108 {
1109 if (pci_use_mid_pm())
1110 return PCI_POWER_ERROR;
1111
1112 return acpi_pci_wakeup(dev, enable);
1113 }
1114
platform_pci_need_resume(struct pci_dev * dev)1115 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1116 {
1117 if (pci_use_mid_pm())
1118 return false;
1119
1120 return acpi_pci_need_resume(dev);
1121 }
1122
platform_pci_bridge_d3(struct pci_dev * dev)1123 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1124 {
1125 if (pci_use_mid_pm())
1126 return false;
1127
1128 return acpi_pci_bridge_d3(dev);
1129 }
1130
1131 /**
1132 * pci_update_current_state - Read power state of given device and cache it
1133 * @dev: PCI device to handle.
1134 * @state: State to cache in case the device doesn't have the PM capability
1135 *
1136 * The power state is read from the PMCSR register, which however is
1137 * inaccessible in D3cold. The platform firmware is therefore queried first
1138 * to detect accessibility of the register. In case the platform firmware
1139 * reports an incorrect state or the device isn't power manageable by the
1140 * platform at all, we try to detect D3cold by testing accessibility of the
1141 * vendor ID in config space.
1142 */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)1143 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1144 {
1145 if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1146 dev->current_state = PCI_D3cold;
1147 } else if (dev->pm_cap) {
1148 u16 pmcsr;
1149
1150 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1151 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1152 dev->current_state = PCI_D3cold;
1153 return;
1154 }
1155 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1156 } else {
1157 dev->current_state = state;
1158 }
1159 }
1160
1161 /**
1162 * pci_refresh_power_state - Refresh the given device's power state data
1163 * @dev: Target PCI device.
1164 *
1165 * Ask the platform to refresh the devices power state information and invoke
1166 * pci_update_current_state() to update its current PCI power state.
1167 */
pci_refresh_power_state(struct pci_dev * dev)1168 void pci_refresh_power_state(struct pci_dev *dev)
1169 {
1170 platform_pci_refresh_power_state(dev);
1171 pci_update_current_state(dev, dev->current_state);
1172 }
1173
1174 /**
1175 * pci_platform_power_transition - Use platform to change device power state
1176 * @dev: PCI device to handle.
1177 * @state: State to put the device into.
1178 */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)1179 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1180 {
1181 int error;
1182
1183 error = platform_pci_set_power_state(dev, state);
1184 if (!error)
1185 pci_update_current_state(dev, state);
1186 else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1187 dev->current_state = PCI_D0;
1188
1189 return error;
1190 }
1191 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1192
pci_resume_one(struct pci_dev * pci_dev,void * ign)1193 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1194 {
1195 pm_request_resume(&pci_dev->dev);
1196 return 0;
1197 }
1198
1199 /**
1200 * pci_resume_bus - Walk given bus and runtime resume devices on it
1201 * @bus: Top bus of the subtree to walk.
1202 */
pci_resume_bus(struct pci_bus * bus)1203 void pci_resume_bus(struct pci_bus *bus)
1204 {
1205 if (bus)
1206 pci_walk_bus(bus, pci_resume_one, NULL);
1207 }
1208
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)1209 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1210 {
1211 int delay = 1;
1212 bool retrain = false;
1213 struct pci_dev *root, *bridge;
1214
1215 root = pcie_find_root_port(dev);
1216
1217 if (pci_is_pcie(dev)) {
1218 bridge = pci_upstream_bridge(dev);
1219 if (bridge)
1220 retrain = true;
1221 }
1222
1223 /*
1224 * The caller has already waited long enough after a reset that the
1225 * device should respond to config requests, but it may respond
1226 * with Request Retry Status (RRS) if it needs more time to
1227 * initialize.
1228 *
1229 * If the device is below a Root Port with Configuration RRS
1230 * Software Visibility enabled, reading the Vendor ID returns a
1231 * special data value if the device responded with RRS. Read the
1232 * Vendor ID until we get non-RRS status.
1233 *
1234 * If there's no Root Port or Configuration RRS Software Visibility
1235 * is not enabled, the device may still respond with RRS, but
1236 * hardware may retry the config request. If no retries receive
1237 * Successful Completion, hardware generally synthesizes ~0
1238 * (PCI_ERROR_RESPONSE) data to complete the read. Reading Vendor
1239 * ID for VFs and non-existent devices also returns ~0, so read the
1240 * Command register until it returns something other than ~0.
1241 */
1242 for (;;) {
1243 u32 id;
1244
1245 if (pci_dev_is_disconnected(dev)) {
1246 pci_dbg(dev, "disconnected; not waiting\n");
1247 return -ENOTTY;
1248 }
1249
1250 if (root && root->config_rrs_sv) {
1251 pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
1252 if (!pci_bus_rrs_vendor_id(id))
1253 break;
1254 } else {
1255 pci_read_config_dword(dev, PCI_COMMAND, &id);
1256 if (!PCI_POSSIBLE_ERROR(id))
1257 break;
1258 }
1259
1260 if (delay > timeout) {
1261 pci_warn(dev, "not ready %dms after %s; giving up\n",
1262 delay - 1, reset_type);
1263 return -ENOTTY;
1264 }
1265
1266 if (delay > PCI_RESET_WAIT) {
1267 if (retrain) {
1268 retrain = false;
1269 if (pcie_failed_link_retrain(bridge) == 0) {
1270 delay = 1;
1271 continue;
1272 }
1273 }
1274 pci_info(dev, "not ready %dms after %s; waiting\n",
1275 delay - 1, reset_type);
1276 }
1277
1278 msleep(delay);
1279 delay *= 2;
1280 }
1281
1282 if (delay > PCI_RESET_WAIT)
1283 pci_info(dev, "ready %dms after %s\n", delay - 1,
1284 reset_type);
1285 else
1286 pci_dbg(dev, "ready %dms after %s\n", delay - 1,
1287 reset_type);
1288
1289 return 0;
1290 }
1291
1292 /**
1293 * pci_power_up - Put the given device into D0
1294 * @dev: PCI device to power up
1295 *
1296 * On success, return 0 or 1, depending on whether or not it is necessary to
1297 * restore the device's BARs subsequently (1 is returned in that case).
1298 *
1299 * On failure, return a negative error code. Always return failure if @dev
1300 * lacks a Power Management Capability, even if the platform was able to
1301 * put the device in D0 via non-PCI means.
1302 */
pci_power_up(struct pci_dev * dev)1303 int pci_power_up(struct pci_dev *dev)
1304 {
1305 bool need_restore;
1306 pci_power_t state;
1307 u16 pmcsr;
1308
1309 platform_pci_set_power_state(dev, PCI_D0);
1310
1311 if (!dev->pm_cap) {
1312 state = platform_pci_get_power_state(dev);
1313 if (state == PCI_UNKNOWN)
1314 dev->current_state = PCI_D0;
1315 else
1316 dev->current_state = state;
1317
1318 return -EIO;
1319 }
1320
1321 if (pci_dev_is_disconnected(dev)) {
1322 dev->current_state = PCI_D3cold;
1323 return -EIO;
1324 }
1325
1326 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1327 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1328 pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1329 pci_power_name(dev->current_state));
1330 dev->current_state = PCI_D3cold;
1331 return -EIO;
1332 }
1333
1334 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1335
1336 need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1337 !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1338
1339 if (state == PCI_D0)
1340 goto end;
1341
1342 /*
1343 * Force the entire word to 0. This doesn't affect PME_Status, disables
1344 * PME_En, and sets PowerState to 0.
1345 */
1346 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1347
1348 /* Mandatory transition delays; see PCI PM 1.2. */
1349 if (state == PCI_D3hot)
1350 pci_dev_d3_sleep(dev);
1351 else if (state == PCI_D2)
1352 udelay(PCI_PM_D2_DELAY);
1353
1354 end:
1355 dev->current_state = PCI_D0;
1356 if (need_restore)
1357 return 1;
1358
1359 return 0;
1360 }
1361
1362 /**
1363 * pci_set_full_power_state - Put a PCI device into D0 and update its state
1364 * @dev: PCI device to power up
1365 * @locked: whether pci_bus_sem is held
1366 *
1367 * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1368 * to confirm the state change, restore its BARs if they might be lost and
1369 * reconfigure ASPM in accordance with the new power state.
1370 *
1371 * If pci_restore_state() is going to be called right after a power state change
1372 * to D0, it is more efficient to use pci_power_up() directly instead of this
1373 * function.
1374 */
pci_set_full_power_state(struct pci_dev * dev,bool locked)1375 static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
1376 {
1377 u16 pmcsr;
1378 int ret;
1379
1380 ret = pci_power_up(dev);
1381 if (ret < 0) {
1382 if (dev->current_state == PCI_D0)
1383 return 0;
1384
1385 return ret;
1386 }
1387
1388 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1389 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1390 if (dev->current_state != PCI_D0) {
1391 pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1392 pci_power_name(dev->current_state));
1393 } else if (ret > 0) {
1394 /*
1395 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1396 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1397 * from D3hot to D0 _may_ perform an internal reset, thereby
1398 * going to "D0 Uninitialized" rather than "D0 Initialized".
1399 * For example, at least some versions of the 3c905B and the
1400 * 3c556B exhibit this behaviour.
1401 *
1402 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1403 * devices in a D3hot state at boot. Consequently, we need to
1404 * restore at least the BARs so that the device will be
1405 * accessible to its driver.
1406 */
1407 pci_restore_bars(dev);
1408 }
1409
1410 if (dev->bus->self)
1411 pcie_aspm_pm_state_change(dev->bus->self, locked);
1412
1413 return 0;
1414 }
1415
1416 /**
1417 * __pci_dev_set_current_state - Set current state of a PCI device
1418 * @dev: Device to handle
1419 * @data: pointer to state to be set
1420 */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1421 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1422 {
1423 pci_power_t state = *(pci_power_t *)data;
1424
1425 dev->current_state = state;
1426 return 0;
1427 }
1428
1429 /**
1430 * pci_bus_set_current_state - Walk given bus and set current state of devices
1431 * @bus: Top bus of the subtree to walk.
1432 * @state: state to be set
1433 */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1434 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1435 {
1436 if (bus)
1437 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1438 }
1439
__pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state,bool locked)1440 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
1441 {
1442 if (!bus)
1443 return;
1444
1445 if (locked)
1446 pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
1447 else
1448 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1449 }
1450
1451 /**
1452 * pci_set_low_power_state - Put a PCI device into a low-power state.
1453 * @dev: PCI device to handle.
1454 * @state: PCI power state (D1, D2, D3hot) to put the device into.
1455 * @locked: whether pci_bus_sem is held
1456 *
1457 * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1458 *
1459 * RETURN VALUE:
1460 * -EINVAL if the requested state is invalid.
1461 * -EIO if device does not support PCI PM or its PM capabilities register has a
1462 * wrong version, or device doesn't support the requested state.
1463 * 0 if device already is in the requested state.
1464 * 0 if device's power state has been successfully changed.
1465 */
pci_set_low_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1466 static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1467 {
1468 u16 pmcsr;
1469
1470 if (!dev->pm_cap)
1471 return -EIO;
1472
1473 /*
1474 * Validate transition: We can enter D0 from any state, but if
1475 * we're already in a low-power state, we can only go deeper. E.g.,
1476 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1477 * we'd have to go from D3 to D0, then to D1.
1478 */
1479 if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1480 pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1481 pci_power_name(dev->current_state),
1482 pci_power_name(state));
1483 return -EINVAL;
1484 }
1485
1486 /* Check if this device supports the desired state */
1487 if ((state == PCI_D1 && !dev->d1_support)
1488 || (state == PCI_D2 && !dev->d2_support))
1489 return -EIO;
1490
1491 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1492 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1493 pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1494 pci_power_name(dev->current_state),
1495 pci_power_name(state));
1496 dev->current_state = PCI_D3cold;
1497 return -EIO;
1498 }
1499
1500 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1501 pmcsr |= state;
1502
1503 /* Enter specified state */
1504 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1505
1506 /* Mandatory power management transition delays; see PCI PM 1.2. */
1507 if (state == PCI_D3hot)
1508 pci_dev_d3_sleep(dev);
1509 else if (state == PCI_D2)
1510 udelay(PCI_PM_D2_DELAY);
1511
1512 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1513 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1514 if (dev->current_state != state)
1515 pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1516 pci_power_name(dev->current_state),
1517 pci_power_name(state));
1518
1519 if (dev->bus->self)
1520 pcie_aspm_pm_state_change(dev->bus->self, locked);
1521
1522 return 0;
1523 }
1524
__pci_set_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1525 static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1526 {
1527 int error;
1528
1529 /* Bound the state we're entering */
1530 if (state > PCI_D3cold)
1531 state = PCI_D3cold;
1532 else if (state < PCI_D0)
1533 state = PCI_D0;
1534 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1535
1536 /*
1537 * If the device or the parent bridge do not support PCI
1538 * PM, ignore the request if we're doing anything other
1539 * than putting it into D0 (which would only happen on
1540 * boot).
1541 */
1542 return 0;
1543
1544 /* Check if we're already there */
1545 if (dev->current_state == state)
1546 return 0;
1547
1548 if (state == PCI_D0)
1549 return pci_set_full_power_state(dev, locked);
1550
1551 /*
1552 * This device is quirked not to be put into D3, so don't put it in
1553 * D3
1554 */
1555 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1556 return 0;
1557
1558 if (state == PCI_D3cold) {
1559 /*
1560 * To put the device in D3cold, put it into D3hot in the native
1561 * way, then put it into D3cold using platform ops.
1562 */
1563 error = pci_set_low_power_state(dev, PCI_D3hot, locked);
1564
1565 if (pci_platform_power_transition(dev, PCI_D3cold))
1566 return error;
1567
1568 /* Powering off a bridge may power off the whole hierarchy */
1569 if (dev->current_state == PCI_D3cold)
1570 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
1571 } else {
1572 error = pci_set_low_power_state(dev, state, locked);
1573
1574 if (pci_platform_power_transition(dev, state))
1575 return error;
1576 }
1577
1578 return 0;
1579 }
1580
1581 /**
1582 * pci_set_power_state - Set the power state of a PCI device
1583 * @dev: PCI device to handle.
1584 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1585 *
1586 * Transition a device to a new power state, using the platform firmware and/or
1587 * the device's PCI PM registers.
1588 *
1589 * RETURN VALUE:
1590 * -EINVAL if the requested state is invalid.
1591 * -EIO if device does not support PCI PM or its PM capabilities register has a
1592 * wrong version, or device doesn't support the requested state.
1593 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1594 * 0 if device already is in the requested state.
1595 * 0 if the transition is to D3 but D3 is not supported.
1596 * 0 if device's power state has been successfully changed.
1597 */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1598 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1599 {
1600 return __pci_set_power_state(dev, state, false);
1601 }
1602 EXPORT_SYMBOL(pci_set_power_state);
1603
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)1604 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
1605 {
1606 lockdep_assert_held(&pci_bus_sem);
1607
1608 return __pci_set_power_state(dev, state, true);
1609 }
1610 EXPORT_SYMBOL(pci_set_power_state_locked);
1611
1612 #define PCI_EXP_SAVE_REGS 7
1613
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1614 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1615 u16 cap, bool extended)
1616 {
1617 struct pci_cap_saved_state *tmp;
1618
1619 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1620 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1621 return tmp;
1622 }
1623 return NULL;
1624 }
1625
pci_find_saved_cap(struct pci_dev * dev,char cap)1626 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1627 {
1628 return _pci_find_saved_cap(dev, cap, false);
1629 }
1630
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1631 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1632 {
1633 return _pci_find_saved_cap(dev, cap, true);
1634 }
1635
pci_save_pcie_state(struct pci_dev * dev)1636 static int pci_save_pcie_state(struct pci_dev *dev)
1637 {
1638 int i = 0;
1639 struct pci_cap_saved_state *save_state;
1640 u16 *cap;
1641
1642 if (!pci_is_pcie(dev))
1643 return 0;
1644
1645 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1646 if (!save_state) {
1647 pci_err(dev, "buffer not found in %s\n", __func__);
1648 return -ENOMEM;
1649 }
1650
1651 cap = (u16 *)&save_state->cap.data[0];
1652 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1653 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1654 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1655 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1656 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1657 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1658 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1659
1660 pci_save_aspm_l1ss_state(dev);
1661 pci_save_ltr_state(dev);
1662
1663 return 0;
1664 }
1665
pci_restore_pcie_state(struct pci_dev * dev)1666 static void pci_restore_pcie_state(struct pci_dev *dev)
1667 {
1668 int i = 0;
1669 struct pci_cap_saved_state *save_state;
1670 u16 *cap;
1671
1672 /*
1673 * Restore max latencies (in the LTR capability) before enabling
1674 * LTR itself in PCI_EXP_DEVCTL2.
1675 */
1676 pci_restore_ltr_state(dev);
1677 pci_restore_aspm_l1ss_state(dev);
1678
1679 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1680 if (!save_state)
1681 return;
1682
1683 /*
1684 * Downstream ports reset the LTR enable bit when link goes down.
1685 * Check and re-configure the bit here before restoring device.
1686 * PCIe r5.0, sec 7.5.3.16.
1687 */
1688 pci_bridge_reconfigure_ltr(dev);
1689
1690 cap = (u16 *)&save_state->cap.data[0];
1691 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1692 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1693 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1694 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1695 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1696 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1697 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1698 }
1699
pci_save_pcix_state(struct pci_dev * dev)1700 static int pci_save_pcix_state(struct pci_dev *dev)
1701 {
1702 int pos;
1703 struct pci_cap_saved_state *save_state;
1704
1705 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1706 if (!pos)
1707 return 0;
1708
1709 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1710 if (!save_state) {
1711 pci_err(dev, "buffer not found in %s\n", __func__);
1712 return -ENOMEM;
1713 }
1714
1715 pci_read_config_word(dev, pos + PCI_X_CMD,
1716 (u16 *)save_state->cap.data);
1717
1718 return 0;
1719 }
1720
pci_restore_pcix_state(struct pci_dev * dev)1721 static void pci_restore_pcix_state(struct pci_dev *dev)
1722 {
1723 int i = 0, pos;
1724 struct pci_cap_saved_state *save_state;
1725 u16 *cap;
1726
1727 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1728 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1729 if (!save_state || !pos)
1730 return;
1731 cap = (u16 *)&save_state->cap.data[0];
1732
1733 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1734 }
1735
1736 /**
1737 * pci_save_state - save the PCI configuration space of a device before
1738 * suspending
1739 * @dev: PCI device that we're dealing with
1740 */
pci_save_state(struct pci_dev * dev)1741 int pci_save_state(struct pci_dev *dev)
1742 {
1743 int i;
1744 /* XXX: 100% dword access ok here? */
1745 for (i = 0; i < 16; i++) {
1746 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1747 pci_dbg(dev, "save config %#04x: %#010x\n",
1748 i * 4, dev->saved_config_space[i]);
1749 }
1750 dev->state_saved = true;
1751
1752 i = pci_save_pcie_state(dev);
1753 if (i != 0)
1754 return i;
1755
1756 i = pci_save_pcix_state(dev);
1757 if (i != 0)
1758 return i;
1759
1760 pci_save_dpc_state(dev);
1761 pci_save_aer_state(dev);
1762 pci_save_ptm_state(dev);
1763 pci_save_tph_state(dev);
1764 return pci_save_vc_state(dev);
1765 }
1766 EXPORT_SYMBOL(pci_save_state);
1767
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1768 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1769 u32 saved_val, int retry, bool force)
1770 {
1771 u32 val;
1772
1773 pci_read_config_dword(pdev, offset, &val);
1774 if (!force && val == saved_val)
1775 return;
1776
1777 for (;;) {
1778 pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n",
1779 offset, val, saved_val);
1780 pci_write_config_dword(pdev, offset, saved_val);
1781 if (retry-- <= 0)
1782 return;
1783
1784 pci_read_config_dword(pdev, offset, &val);
1785 if (val == saved_val)
1786 return;
1787
1788 mdelay(1);
1789 }
1790 }
1791
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1792 static void pci_restore_config_space_range(struct pci_dev *pdev,
1793 int start, int end, int retry,
1794 bool force)
1795 {
1796 int index;
1797
1798 for (index = end; index >= start; index--)
1799 pci_restore_config_dword(pdev, 4 * index,
1800 pdev->saved_config_space[index],
1801 retry, force);
1802 }
1803
pci_restore_config_space(struct pci_dev * pdev)1804 static void pci_restore_config_space(struct pci_dev *pdev)
1805 {
1806 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1807 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1808 /* Restore BARs before the command register. */
1809 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1810 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1811 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1812 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1813
1814 /*
1815 * Force rewriting of prefetch registers to avoid S3 resume
1816 * issues on Intel PCI bridges that occur when these
1817 * registers are not explicitly written.
1818 */
1819 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1820 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1821 } else {
1822 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1823 }
1824 }
1825
pci_restore_rebar_state(struct pci_dev * pdev)1826 static void pci_restore_rebar_state(struct pci_dev *pdev)
1827 {
1828 unsigned int pos, nbars, i;
1829 u32 ctrl;
1830
1831 pos = pdev->rebar_cap;
1832 if (!pos)
1833 return;
1834
1835 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1836 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
1837
1838 for (i = 0; i < nbars; i++, pos += 8) {
1839 struct resource *res;
1840 int bar_idx, size;
1841
1842 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1843 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1844 res = pci_resource_n(pdev, bar_idx);
1845 size = pci_rebar_bytes_to_size(resource_size(res));
1846 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1847 ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
1848 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1849 }
1850 }
1851
1852 /**
1853 * pci_restore_state - Restore the saved state of a PCI device
1854 * @dev: PCI device that we're dealing with
1855 */
pci_restore_state(struct pci_dev * dev)1856 void pci_restore_state(struct pci_dev *dev)
1857 {
1858 if (!dev->state_saved)
1859 return;
1860
1861 pci_restore_pcie_state(dev);
1862 pci_restore_pasid_state(dev);
1863 pci_restore_pri_state(dev);
1864 pci_restore_ats_state(dev);
1865 pci_restore_vc_state(dev);
1866 pci_restore_rebar_state(dev);
1867 pci_restore_dpc_state(dev);
1868 pci_restore_ptm_state(dev);
1869 pci_restore_tph_state(dev);
1870
1871 pci_aer_clear_status(dev);
1872 pci_restore_aer_state(dev);
1873
1874 pci_restore_config_space(dev);
1875
1876 pci_restore_pcix_state(dev);
1877 pci_restore_msi_state(dev);
1878
1879 /* Restore ACS and IOV configuration state */
1880 pci_enable_acs(dev);
1881 pci_restore_iov_state(dev);
1882
1883 dev->state_saved = false;
1884 }
1885 EXPORT_SYMBOL(pci_restore_state);
1886
1887 struct pci_saved_state {
1888 u32 config_space[16];
1889 struct pci_cap_saved_data cap[];
1890 };
1891
1892 /**
1893 * pci_store_saved_state - Allocate and return an opaque struct containing
1894 * the device saved state.
1895 * @dev: PCI device that we're dealing with
1896 *
1897 * Return NULL if no state or error.
1898 */
pci_store_saved_state(struct pci_dev * dev)1899 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1900 {
1901 struct pci_saved_state *state;
1902 struct pci_cap_saved_state *tmp;
1903 struct pci_cap_saved_data *cap;
1904 size_t size;
1905
1906 if (!dev->state_saved)
1907 return NULL;
1908
1909 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1910
1911 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1912 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1913
1914 state = kzalloc(size, GFP_KERNEL);
1915 if (!state)
1916 return NULL;
1917
1918 memcpy(state->config_space, dev->saved_config_space,
1919 sizeof(state->config_space));
1920
1921 cap = state->cap;
1922 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1923 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1924 memcpy(cap, &tmp->cap, len);
1925 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1926 }
1927 /* Empty cap_save terminates list */
1928
1929 return state;
1930 }
1931 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1932
1933 /**
1934 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1935 * @dev: PCI device that we're dealing with
1936 * @state: Saved state returned from pci_store_saved_state()
1937 */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)1938 int pci_load_saved_state(struct pci_dev *dev,
1939 struct pci_saved_state *state)
1940 {
1941 struct pci_cap_saved_data *cap;
1942
1943 dev->state_saved = false;
1944
1945 if (!state)
1946 return 0;
1947
1948 memcpy(dev->saved_config_space, state->config_space,
1949 sizeof(state->config_space));
1950
1951 cap = state->cap;
1952 while (cap->size) {
1953 struct pci_cap_saved_state *tmp;
1954
1955 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1956 if (!tmp || tmp->cap.size != cap->size)
1957 return -EINVAL;
1958
1959 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1960 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1961 sizeof(struct pci_cap_saved_data) + cap->size);
1962 }
1963
1964 dev->state_saved = true;
1965 return 0;
1966 }
1967 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1968
1969 /**
1970 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1971 * and free the memory allocated for it.
1972 * @dev: PCI device that we're dealing with
1973 * @state: Pointer to saved state returned from pci_store_saved_state()
1974 */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)1975 int pci_load_and_free_saved_state(struct pci_dev *dev,
1976 struct pci_saved_state **state)
1977 {
1978 int ret = pci_load_saved_state(dev, *state);
1979 kfree(*state);
1980 *state = NULL;
1981 return ret;
1982 }
1983 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1984
pcibios_enable_device(struct pci_dev * dev,int bars)1985 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1986 {
1987 return pci_enable_resources(dev, bars);
1988 }
1989
pci_host_bridge_enable_device(struct pci_dev * dev)1990 static int pci_host_bridge_enable_device(struct pci_dev *dev)
1991 {
1992 struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
1993 int err;
1994
1995 if (host_bridge && host_bridge->enable_device) {
1996 err = host_bridge->enable_device(host_bridge, dev);
1997 if (err)
1998 return err;
1999 }
2000
2001 return 0;
2002 }
2003
pci_host_bridge_disable_device(struct pci_dev * dev)2004 static void pci_host_bridge_disable_device(struct pci_dev *dev)
2005 {
2006 struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
2007
2008 if (host_bridge && host_bridge->disable_device)
2009 host_bridge->disable_device(host_bridge, dev);
2010 }
2011
do_pci_enable_device(struct pci_dev * dev,int bars)2012 static int do_pci_enable_device(struct pci_dev *dev, int bars)
2013 {
2014 int err;
2015 struct pci_dev *bridge;
2016 u16 cmd;
2017 u8 pin;
2018
2019 err = pci_set_power_state(dev, PCI_D0);
2020 if (err < 0 && err != -EIO)
2021 return err;
2022
2023 bridge = pci_upstream_bridge(dev);
2024 if (bridge)
2025 pcie_aspm_powersave_config_link(bridge);
2026
2027 err = pci_host_bridge_enable_device(dev);
2028 if (err)
2029 return err;
2030
2031 err = pcibios_enable_device(dev, bars);
2032 if (err < 0)
2033 goto err_enable;
2034 pci_fixup_device(pci_fixup_enable, dev);
2035
2036 if (dev->msi_enabled || dev->msix_enabled)
2037 return 0;
2038
2039 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
2040 if (pin) {
2041 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2042 if (cmd & PCI_COMMAND_INTX_DISABLE)
2043 pci_write_config_word(dev, PCI_COMMAND,
2044 cmd & ~PCI_COMMAND_INTX_DISABLE);
2045 }
2046
2047 return 0;
2048
2049 err_enable:
2050 pci_host_bridge_disable_device(dev);
2051
2052 return err;
2053
2054 }
2055
2056 /**
2057 * pci_reenable_device - Resume abandoned device
2058 * @dev: PCI device to be resumed
2059 *
2060 * NOTE: This function is a backend of pci_default_resume() and is not supposed
2061 * to be called by normal code, write proper resume handler and use it instead.
2062 */
pci_reenable_device(struct pci_dev * dev)2063 int pci_reenable_device(struct pci_dev *dev)
2064 {
2065 if (pci_is_enabled(dev))
2066 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
2067 return 0;
2068 }
2069 EXPORT_SYMBOL(pci_reenable_device);
2070
pci_enable_bridge(struct pci_dev * dev)2071 static void pci_enable_bridge(struct pci_dev *dev)
2072 {
2073 struct pci_dev *bridge;
2074 int retval;
2075
2076 bridge = pci_upstream_bridge(dev);
2077 if (bridge)
2078 pci_enable_bridge(bridge);
2079
2080 if (pci_is_enabled(dev)) {
2081 if (!dev->is_busmaster)
2082 pci_set_master(dev);
2083 return;
2084 }
2085
2086 retval = pci_enable_device(dev);
2087 if (retval)
2088 pci_err(dev, "Error enabling bridge (%d), continuing\n",
2089 retval);
2090 pci_set_master(dev);
2091 }
2092
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)2093 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
2094 {
2095 struct pci_dev *bridge;
2096 int err;
2097 int i, bars = 0;
2098
2099 /*
2100 * Power state could be unknown at this point, either due to a fresh
2101 * boot or a device removal call. So get the current power state
2102 * so that things like MSI message writing will behave as expected
2103 * (e.g. if the device really is in D0 at enable time).
2104 */
2105 pci_update_current_state(dev, dev->current_state);
2106
2107 if (atomic_inc_return(&dev->enable_cnt) > 1)
2108 return 0; /* already enabled */
2109
2110 bridge = pci_upstream_bridge(dev);
2111 if (bridge)
2112 pci_enable_bridge(bridge);
2113
2114 /* only skip sriov related */
2115 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2116 if (dev->resource[i].flags & flags)
2117 bars |= (1 << i);
2118 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2119 if (dev->resource[i].flags & flags)
2120 bars |= (1 << i);
2121
2122 err = do_pci_enable_device(dev, bars);
2123 if (err < 0)
2124 atomic_dec(&dev->enable_cnt);
2125 return err;
2126 }
2127
2128 /**
2129 * pci_enable_device_mem - Initialize a device for use with Memory space
2130 * @dev: PCI device to be initialized
2131 *
2132 * Initialize device before it's used by a driver. Ask low-level code
2133 * to enable Memory resources. Wake up the device if it was suspended.
2134 * Beware, this function can fail.
2135 */
pci_enable_device_mem(struct pci_dev * dev)2136 int pci_enable_device_mem(struct pci_dev *dev)
2137 {
2138 return pci_enable_device_flags(dev, IORESOURCE_MEM);
2139 }
2140 EXPORT_SYMBOL(pci_enable_device_mem);
2141
2142 /**
2143 * pci_enable_device - Initialize device before it's used by a driver.
2144 * @dev: PCI device to be initialized
2145 *
2146 * Initialize device before it's used by a driver. Ask low-level code
2147 * to enable I/O and memory. Wake up the device if it was suspended.
2148 * Beware, this function can fail.
2149 *
2150 * Note we don't actually enable the device many times if we call
2151 * this function repeatedly (we just increment the count).
2152 */
pci_enable_device(struct pci_dev * dev)2153 int pci_enable_device(struct pci_dev *dev)
2154 {
2155 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2156 }
2157 EXPORT_SYMBOL(pci_enable_device);
2158
2159 /*
2160 * pcibios_device_add - provide arch specific hooks when adding device dev
2161 * @dev: the PCI device being added
2162 *
2163 * Permits the platform to provide architecture specific functionality when
2164 * devices are added. This is the default implementation. Architecture
2165 * implementations can override this.
2166 */
pcibios_device_add(struct pci_dev * dev)2167 int __weak pcibios_device_add(struct pci_dev *dev)
2168 {
2169 return 0;
2170 }
2171
2172 /**
2173 * pcibios_release_device - provide arch specific hooks when releasing
2174 * device dev
2175 * @dev: the PCI device being released
2176 *
2177 * Permits the platform to provide architecture specific functionality when
2178 * devices are released. This is the default implementation. Architecture
2179 * implementations can override this.
2180 */
pcibios_release_device(struct pci_dev * dev)2181 void __weak pcibios_release_device(struct pci_dev *dev) {}
2182
2183 /**
2184 * pcibios_disable_device - disable arch specific PCI resources for device dev
2185 * @dev: the PCI device to disable
2186 *
2187 * Disables architecture specific PCI resources for the device. This
2188 * is the default implementation. Architecture implementations can
2189 * override this.
2190 */
pcibios_disable_device(struct pci_dev * dev)2191 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2192
do_pci_disable_device(struct pci_dev * dev)2193 static void do_pci_disable_device(struct pci_dev *dev)
2194 {
2195 u16 pci_command;
2196
2197 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2198 if (pci_command & PCI_COMMAND_MASTER) {
2199 pci_command &= ~PCI_COMMAND_MASTER;
2200 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2201 }
2202
2203 pcibios_disable_device(dev);
2204 }
2205
2206 /**
2207 * pci_disable_enabled_device - Disable device without updating enable_cnt
2208 * @dev: PCI device to disable
2209 *
2210 * NOTE: This function is a backend of PCI power management routines and is
2211 * not supposed to be called drivers.
2212 */
pci_disable_enabled_device(struct pci_dev * dev)2213 void pci_disable_enabled_device(struct pci_dev *dev)
2214 {
2215 if (pci_is_enabled(dev))
2216 do_pci_disable_device(dev);
2217 }
2218
2219 /**
2220 * pci_disable_device - Disable PCI device after use
2221 * @dev: PCI device to be disabled
2222 *
2223 * Signal to the system that the PCI device is not in use by the system
2224 * anymore. This only involves disabling PCI bus-mastering, if active.
2225 *
2226 * Note we don't actually disable the device until all callers of
2227 * pci_enable_device() have called pci_disable_device().
2228 */
pci_disable_device(struct pci_dev * dev)2229 void pci_disable_device(struct pci_dev *dev)
2230 {
2231 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2232 "disabling already-disabled device");
2233
2234 if (atomic_dec_return(&dev->enable_cnt) != 0)
2235 return;
2236
2237 pci_host_bridge_disable_device(dev);
2238
2239 do_pci_disable_device(dev);
2240
2241 dev->is_busmaster = 0;
2242 }
2243 EXPORT_SYMBOL(pci_disable_device);
2244
2245 /**
2246 * pcibios_set_pcie_reset_state - set reset state for device dev
2247 * @dev: the PCIe device reset
2248 * @state: Reset state to enter into
2249 *
2250 * Set the PCIe reset state for the device. This is the default
2251 * implementation. Architecture implementations can override this.
2252 */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2253 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2254 enum pcie_reset_state state)
2255 {
2256 return -EINVAL;
2257 }
2258
2259 /**
2260 * pci_set_pcie_reset_state - set reset state for device dev
2261 * @dev: the PCIe device reset
2262 * @state: Reset state to enter into
2263 *
2264 * Sets the PCI reset state for the device.
2265 */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2266 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2267 {
2268 return pcibios_set_pcie_reset_state(dev, state);
2269 }
2270 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2271
2272 #ifdef CONFIG_PCIEAER
pcie_clear_device_status(struct pci_dev * dev)2273 void pcie_clear_device_status(struct pci_dev *dev)
2274 {
2275 u16 sta;
2276
2277 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2278 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2279 }
2280 #endif
2281
2282 /**
2283 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2284 * @dev: PCIe root port or event collector.
2285 */
pcie_clear_root_pme_status(struct pci_dev * dev)2286 void pcie_clear_root_pme_status(struct pci_dev *dev)
2287 {
2288 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2289 }
2290
2291 /**
2292 * pci_check_pme_status - Check if given device has generated PME.
2293 * @dev: Device to check.
2294 *
2295 * Check the PME status of the device and if set, clear it and clear PME enable
2296 * (if set). Return 'true' if PME status and PME enable were both set or
2297 * 'false' otherwise.
2298 */
pci_check_pme_status(struct pci_dev * dev)2299 bool pci_check_pme_status(struct pci_dev *dev)
2300 {
2301 int pmcsr_pos;
2302 u16 pmcsr;
2303 bool ret = false;
2304
2305 if (!dev->pm_cap)
2306 return false;
2307
2308 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2309 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2310 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2311 return false;
2312
2313 /* Clear PME status. */
2314 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2315 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2316 /* Disable PME to avoid interrupt flood. */
2317 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2318 ret = true;
2319 }
2320
2321 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2322
2323 return ret;
2324 }
2325
2326 /**
2327 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2328 * @dev: Device to handle.
2329 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2330 *
2331 * Check if @dev has generated PME and queue a resume request for it in that
2332 * case.
2333 */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2334 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2335 {
2336 if (pme_poll_reset && dev->pme_poll)
2337 dev->pme_poll = false;
2338
2339 if (pci_check_pme_status(dev)) {
2340 pci_wakeup_event(dev);
2341 pm_request_resume(&dev->dev);
2342 }
2343 return 0;
2344 }
2345
2346 /**
2347 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2348 * @bus: Top bus of the subtree to walk.
2349 */
pci_pme_wakeup_bus(struct pci_bus * bus)2350 void pci_pme_wakeup_bus(struct pci_bus *bus)
2351 {
2352 if (bus)
2353 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2354 }
2355
2356
2357 /**
2358 * pci_pme_capable - check the capability of PCI device to generate PME#
2359 * @dev: PCI device to handle.
2360 * @state: PCI state from which device will issue PME#.
2361 */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2362 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2363 {
2364 if (!dev->pm_cap)
2365 return false;
2366
2367 return !!(dev->pme_support & (1 << state));
2368 }
2369 EXPORT_SYMBOL(pci_pme_capable);
2370
pci_pme_list_scan(struct work_struct * work)2371 static void pci_pme_list_scan(struct work_struct *work)
2372 {
2373 struct pci_pme_device *pme_dev, *n;
2374
2375 mutex_lock(&pci_pme_list_mutex);
2376 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2377 struct pci_dev *pdev = pme_dev->dev;
2378
2379 if (pdev->pme_poll) {
2380 struct pci_dev *bridge = pdev->bus->self;
2381 struct device *dev = &pdev->dev;
2382 struct device *bdev = bridge ? &bridge->dev : NULL;
2383 int bref = 0;
2384
2385 /*
2386 * If we have a bridge, it should be in an active/D0
2387 * state or the configuration space of subordinate
2388 * devices may not be accessible or stable over the
2389 * course of the call.
2390 */
2391 if (bdev) {
2392 bref = pm_runtime_get_if_active(bdev);
2393 if (!bref)
2394 continue;
2395
2396 if (bridge->current_state != PCI_D0)
2397 goto put_bridge;
2398 }
2399
2400 /*
2401 * The device itself should be suspended but config
2402 * space must be accessible, therefore it cannot be in
2403 * D3cold.
2404 */
2405 if (pm_runtime_suspended(dev) &&
2406 pdev->current_state != PCI_D3cold)
2407 pci_pme_wakeup(pdev, NULL);
2408
2409 put_bridge:
2410 if (bref > 0)
2411 pm_runtime_put(bdev);
2412 } else {
2413 list_del(&pme_dev->list);
2414 kfree(pme_dev);
2415 }
2416 }
2417 if (!list_empty(&pci_pme_list))
2418 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2419 msecs_to_jiffies(PME_TIMEOUT));
2420 mutex_unlock(&pci_pme_list_mutex);
2421 }
2422
__pci_pme_active(struct pci_dev * dev,bool enable)2423 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2424 {
2425 u16 pmcsr;
2426
2427 if (!dev->pme_support)
2428 return;
2429
2430 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2431 /* Clear PME_Status by writing 1 to it and enable PME# */
2432 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2433 if (!enable)
2434 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2435
2436 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2437 }
2438
2439 /**
2440 * pci_pme_restore - Restore PME configuration after config space restore.
2441 * @dev: PCI device to update.
2442 */
pci_pme_restore(struct pci_dev * dev)2443 void pci_pme_restore(struct pci_dev *dev)
2444 {
2445 u16 pmcsr;
2446
2447 if (!dev->pme_support)
2448 return;
2449
2450 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2451 if (dev->wakeup_prepared) {
2452 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2453 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2454 } else {
2455 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2456 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2457 }
2458 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2459 }
2460
2461 /**
2462 * pci_pme_active - enable or disable PCI device's PME# function
2463 * @dev: PCI device to handle.
2464 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2465 *
2466 * The caller must verify that the device is capable of generating PME# before
2467 * calling this function with @enable equal to 'true'.
2468 */
pci_pme_active(struct pci_dev * dev,bool enable)2469 void pci_pme_active(struct pci_dev *dev, bool enable)
2470 {
2471 __pci_pme_active(dev, enable);
2472
2473 /*
2474 * PCI (as opposed to PCIe) PME requires that the device have
2475 * its PME# line hooked up correctly. Not all hardware vendors
2476 * do this, so the PME never gets delivered and the device
2477 * remains asleep. The easiest way around this is to
2478 * periodically walk the list of suspended devices and check
2479 * whether any have their PME flag set. The assumption is that
2480 * we'll wake up often enough anyway that this won't be a huge
2481 * hit, and the power savings from the devices will still be a
2482 * win.
2483 *
2484 * Although PCIe uses in-band PME message instead of PME# line
2485 * to report PME, PME does not work for some PCIe devices in
2486 * reality. For example, there are devices that set their PME
2487 * status bits, but don't really bother to send a PME message;
2488 * there are PCI Express Root Ports that don't bother to
2489 * trigger interrupts when they receive PME messages from the
2490 * devices below. So PME poll is used for PCIe devices too.
2491 */
2492
2493 if (dev->pme_poll) {
2494 struct pci_pme_device *pme_dev;
2495 if (enable) {
2496 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2497 GFP_KERNEL);
2498 if (!pme_dev) {
2499 pci_warn(dev, "can't enable PME#\n");
2500 return;
2501 }
2502 pme_dev->dev = dev;
2503 mutex_lock(&pci_pme_list_mutex);
2504 list_add(&pme_dev->list, &pci_pme_list);
2505 if (list_is_singular(&pci_pme_list))
2506 queue_delayed_work(system_freezable_wq,
2507 &pci_pme_work,
2508 msecs_to_jiffies(PME_TIMEOUT));
2509 mutex_unlock(&pci_pme_list_mutex);
2510 } else {
2511 mutex_lock(&pci_pme_list_mutex);
2512 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2513 if (pme_dev->dev == dev) {
2514 list_del(&pme_dev->list);
2515 kfree(pme_dev);
2516 break;
2517 }
2518 }
2519 mutex_unlock(&pci_pme_list_mutex);
2520 }
2521 }
2522
2523 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2524 }
2525 EXPORT_SYMBOL(pci_pme_active);
2526
2527 /**
2528 * __pci_enable_wake - enable PCI device as wakeup event source
2529 * @dev: PCI device affected
2530 * @state: PCI state from which device will issue wakeup events
2531 * @enable: True to enable event generation; false to disable
2532 *
2533 * This enables the device as a wakeup event source, or disables it.
2534 * When such events involves platform-specific hooks, those hooks are
2535 * called automatically by this routine.
2536 *
2537 * Devices with legacy power management (no standard PCI PM capabilities)
2538 * always require such platform hooks.
2539 *
2540 * RETURN VALUE:
2541 * 0 is returned on success
2542 * -EINVAL is returned if device is not supposed to wake up the system
2543 * Error code depending on the platform is returned if both the platform and
2544 * the native mechanism fail to enable the generation of wake-up events
2545 */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2546 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2547 {
2548 int ret = 0;
2549
2550 /*
2551 * Bridges that are not power-manageable directly only signal
2552 * wakeup on behalf of subordinate devices which is set up
2553 * elsewhere, so skip them. However, bridges that are
2554 * power-manageable may signal wakeup for themselves (for example,
2555 * on a hotplug event) and they need to be covered here.
2556 */
2557 if (!pci_power_manageable(dev))
2558 return 0;
2559
2560 /* Don't do the same thing twice in a row for one device. */
2561 if (!!enable == !!dev->wakeup_prepared)
2562 return 0;
2563
2564 /*
2565 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2566 * Anderson we should be doing PME# wake enable followed by ACPI wake
2567 * enable. To disable wake-up we call the platform first, for symmetry.
2568 */
2569
2570 if (enable) {
2571 int error;
2572
2573 /*
2574 * Enable PME signaling if the device can signal PME from
2575 * D3cold regardless of whether or not it can signal PME from
2576 * the current target state, because that will allow it to
2577 * signal PME when the hierarchy above it goes into D3cold and
2578 * the device itself ends up in D3cold as a result of that.
2579 */
2580 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2581 pci_pme_active(dev, true);
2582 else
2583 ret = 1;
2584 error = platform_pci_set_wakeup(dev, true);
2585 if (ret)
2586 ret = error;
2587 if (!ret)
2588 dev->wakeup_prepared = true;
2589 } else {
2590 platform_pci_set_wakeup(dev, false);
2591 pci_pme_active(dev, false);
2592 dev->wakeup_prepared = false;
2593 }
2594
2595 return ret;
2596 }
2597
2598 /**
2599 * pci_enable_wake - change wakeup settings for a PCI device
2600 * @pci_dev: Target device
2601 * @state: PCI state from which device will issue wakeup events
2602 * @enable: Whether or not to enable event generation
2603 *
2604 * If @enable is set, check device_may_wakeup() for the device before calling
2605 * __pci_enable_wake() for it.
2606 */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2607 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2608 {
2609 if (enable && !device_may_wakeup(&pci_dev->dev))
2610 return -EINVAL;
2611
2612 return __pci_enable_wake(pci_dev, state, enable);
2613 }
2614 EXPORT_SYMBOL(pci_enable_wake);
2615
2616 /**
2617 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2618 * @dev: PCI device to prepare
2619 * @enable: True to enable wake-up event generation; false to disable
2620 *
2621 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2622 * and this function allows them to set that up cleanly - pci_enable_wake()
2623 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2624 * ordering constraints.
2625 *
2626 * This function only returns error code if the device is not allowed to wake
2627 * up the system from sleep or it is not capable of generating PME# from both
2628 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2629 */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2630 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2631 {
2632 return pci_pme_capable(dev, PCI_D3cold) ?
2633 pci_enable_wake(dev, PCI_D3cold, enable) :
2634 pci_enable_wake(dev, PCI_D3hot, enable);
2635 }
2636 EXPORT_SYMBOL(pci_wake_from_d3);
2637
2638 /**
2639 * pci_target_state - find an appropriate low power state for a given PCI dev
2640 * @dev: PCI device
2641 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2642 *
2643 * Use underlying platform code to find a supported low power state for @dev.
2644 * If the platform can't manage @dev, return the deepest state from which it
2645 * can generate wake events, based on any available PME info.
2646 */
pci_target_state(struct pci_dev * dev,bool wakeup)2647 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2648 {
2649 if (platform_pci_power_manageable(dev)) {
2650 /*
2651 * Call the platform to find the target state for the device.
2652 */
2653 pci_power_t state = platform_pci_choose_state(dev);
2654
2655 switch (state) {
2656 case PCI_POWER_ERROR:
2657 case PCI_UNKNOWN:
2658 return PCI_D3hot;
2659
2660 case PCI_D1:
2661 case PCI_D2:
2662 if (pci_no_d1d2(dev))
2663 return PCI_D3hot;
2664 }
2665
2666 return state;
2667 }
2668
2669 /*
2670 * If the device is in D3cold even though it's not power-manageable by
2671 * the platform, it may have been powered down by non-standard means.
2672 * Best to let it slumber.
2673 */
2674 if (dev->current_state == PCI_D3cold)
2675 return PCI_D3cold;
2676 else if (!dev->pm_cap)
2677 return PCI_D0;
2678
2679 if (wakeup && dev->pme_support) {
2680 pci_power_t state = PCI_D3hot;
2681
2682 /*
2683 * Find the deepest state from which the device can generate
2684 * PME#.
2685 */
2686 while (state && !(dev->pme_support & (1 << state)))
2687 state--;
2688
2689 if (state)
2690 return state;
2691 else if (dev->pme_support & 1)
2692 return PCI_D0;
2693 }
2694
2695 return PCI_D3hot;
2696 }
2697
2698 /**
2699 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2700 * into a sleep state
2701 * @dev: Device to handle.
2702 *
2703 * Choose the power state appropriate for the device depending on whether
2704 * it can wake up the system and/or is power manageable by the platform
2705 * (PCI_D3hot is the default) and put the device into that state.
2706 */
pci_prepare_to_sleep(struct pci_dev * dev)2707 int pci_prepare_to_sleep(struct pci_dev *dev)
2708 {
2709 bool wakeup = device_may_wakeup(&dev->dev);
2710 pci_power_t target_state = pci_target_state(dev, wakeup);
2711 int error;
2712
2713 if (target_state == PCI_POWER_ERROR)
2714 return -EIO;
2715
2716 pci_enable_wake(dev, target_state, wakeup);
2717
2718 error = pci_set_power_state(dev, target_state);
2719
2720 if (error)
2721 pci_enable_wake(dev, target_state, false);
2722
2723 return error;
2724 }
2725 EXPORT_SYMBOL(pci_prepare_to_sleep);
2726
2727 /**
2728 * pci_back_from_sleep - turn PCI device on during system-wide transition
2729 * into working state
2730 * @dev: Device to handle.
2731 *
2732 * Disable device's system wake-up capability and put it into D0.
2733 */
pci_back_from_sleep(struct pci_dev * dev)2734 int pci_back_from_sleep(struct pci_dev *dev)
2735 {
2736 int ret = pci_set_power_state(dev, PCI_D0);
2737
2738 if (ret)
2739 return ret;
2740
2741 pci_enable_wake(dev, PCI_D0, false);
2742 return 0;
2743 }
2744 EXPORT_SYMBOL(pci_back_from_sleep);
2745
2746 /**
2747 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2748 * @dev: PCI device being suspended.
2749 *
2750 * Prepare @dev to generate wake-up events at run time and put it into a low
2751 * power state.
2752 */
pci_finish_runtime_suspend(struct pci_dev * dev)2753 int pci_finish_runtime_suspend(struct pci_dev *dev)
2754 {
2755 pci_power_t target_state;
2756 int error;
2757
2758 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2759 if (target_state == PCI_POWER_ERROR)
2760 return -EIO;
2761
2762 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2763
2764 error = pci_set_power_state(dev, target_state);
2765
2766 if (error)
2767 pci_enable_wake(dev, target_state, false);
2768
2769 return error;
2770 }
2771
2772 /**
2773 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2774 * @dev: Device to check.
2775 *
2776 * Return true if the device itself is capable of generating wake-up events
2777 * (through the platform or using the native PCIe PME) or if the device supports
2778 * PME and one of its upstream bridges can generate wake-up events.
2779 */
pci_dev_run_wake(struct pci_dev * dev)2780 bool pci_dev_run_wake(struct pci_dev *dev)
2781 {
2782 struct pci_bus *bus = dev->bus;
2783
2784 if (!dev->pme_support)
2785 return false;
2786
2787 /* PME-capable in principle, but not from the target power state */
2788 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2789 return false;
2790
2791 if (device_can_wakeup(&dev->dev))
2792 return true;
2793
2794 while (bus->parent) {
2795 struct pci_dev *bridge = bus->self;
2796
2797 if (device_can_wakeup(&bridge->dev))
2798 return true;
2799
2800 bus = bus->parent;
2801 }
2802
2803 /* We have reached the root bus. */
2804 if (bus->bridge)
2805 return device_can_wakeup(bus->bridge);
2806
2807 return false;
2808 }
2809 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2810
2811 /**
2812 * pci_dev_need_resume - Check if it is necessary to resume the device.
2813 * @pci_dev: Device to check.
2814 *
2815 * Return 'true' if the device is not runtime-suspended or it has to be
2816 * reconfigured due to wakeup settings difference between system and runtime
2817 * suspend, or the current power state of it is not suitable for the upcoming
2818 * (system-wide) transition.
2819 */
pci_dev_need_resume(struct pci_dev * pci_dev)2820 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2821 {
2822 struct device *dev = &pci_dev->dev;
2823 pci_power_t target_state;
2824
2825 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2826 return true;
2827
2828 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2829
2830 /*
2831 * If the earlier platform check has not triggered, D3cold is just power
2832 * removal on top of D3hot, so no need to resume the device in that
2833 * case.
2834 */
2835 return target_state != pci_dev->current_state &&
2836 target_state != PCI_D3cold &&
2837 pci_dev->current_state != PCI_D3hot;
2838 }
2839
2840 /**
2841 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2842 * @pci_dev: Device to check.
2843 *
2844 * If the device is suspended and it is not configured for system wakeup,
2845 * disable PME for it to prevent it from waking up the system unnecessarily.
2846 *
2847 * Note that if the device's power state is D3cold and the platform check in
2848 * pci_dev_need_resume() has not triggered, the device's configuration need not
2849 * be changed.
2850 */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2851 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2852 {
2853 struct device *dev = &pci_dev->dev;
2854
2855 spin_lock_irq(&dev->power.lock);
2856
2857 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2858 pci_dev->current_state < PCI_D3cold)
2859 __pci_pme_active(pci_dev, false);
2860
2861 spin_unlock_irq(&dev->power.lock);
2862 }
2863
2864 /**
2865 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2866 * @pci_dev: Device to handle.
2867 *
2868 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2869 * it might have been disabled during the prepare phase of system suspend if
2870 * the device was not configured for system wakeup.
2871 */
pci_dev_complete_resume(struct pci_dev * pci_dev)2872 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2873 {
2874 struct device *dev = &pci_dev->dev;
2875
2876 if (!pci_dev_run_wake(pci_dev))
2877 return;
2878
2879 spin_lock_irq(&dev->power.lock);
2880
2881 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2882 __pci_pme_active(pci_dev, true);
2883
2884 spin_unlock_irq(&dev->power.lock);
2885 }
2886
2887 /**
2888 * pci_choose_state - Choose the power state of a PCI device.
2889 * @dev: Target PCI device.
2890 * @state: Target state for the whole system.
2891 *
2892 * Returns PCI power state suitable for @dev and @state.
2893 */
pci_choose_state(struct pci_dev * dev,pm_message_t state)2894 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2895 {
2896 if (state.event == PM_EVENT_ON)
2897 return PCI_D0;
2898
2899 return pci_target_state(dev, false);
2900 }
2901 EXPORT_SYMBOL(pci_choose_state);
2902
pci_config_pm_runtime_get(struct pci_dev * pdev)2903 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2904 {
2905 struct device *dev = &pdev->dev;
2906 struct device *parent = dev->parent;
2907
2908 if (parent)
2909 pm_runtime_get_sync(parent);
2910 pm_runtime_get_noresume(dev);
2911 /*
2912 * pdev->current_state is set to PCI_D3cold during suspending,
2913 * so wait until suspending completes
2914 */
2915 pm_runtime_barrier(dev);
2916 /*
2917 * Only need to resume devices in D3cold, because config
2918 * registers are still accessible for devices suspended but
2919 * not in D3cold.
2920 */
2921 if (pdev->current_state == PCI_D3cold)
2922 pm_runtime_resume(dev);
2923 }
2924
pci_config_pm_runtime_put(struct pci_dev * pdev)2925 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2926 {
2927 struct device *dev = &pdev->dev;
2928 struct device *parent = dev->parent;
2929
2930 pm_runtime_put(dev);
2931 if (parent)
2932 pm_runtime_put_sync(parent);
2933 }
2934
2935 static const struct dmi_system_id bridge_d3_blacklist[] = {
2936 #ifdef CONFIG_X86
2937 {
2938 /*
2939 * Gigabyte X299 root port is not marked as hotplug capable
2940 * which allows Linux to power manage it. However, this
2941 * confuses the BIOS SMI handler so don't power manage root
2942 * ports on that system.
2943 */
2944 .ident = "X299 DESIGNARE EX-CF",
2945 .matches = {
2946 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2947 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2948 },
2949 },
2950 {
2951 /*
2952 * Downstream device is not accessible after putting a root port
2953 * into D3cold and back into D0 on Elo Continental Z2 board
2954 */
2955 .ident = "Elo Continental Z2",
2956 .matches = {
2957 DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
2958 DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
2959 DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
2960 },
2961 },
2962 {
2963 /*
2964 * Changing power state of root port dGPU is connected fails
2965 * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
2966 */
2967 .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
2968 .matches = {
2969 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
2970 DMI_MATCH(DMI_BOARD_NAME, "1972"),
2971 DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
2972 },
2973 },
2974 #endif
2975 { }
2976 };
2977
2978 /**
2979 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2980 * @bridge: Bridge to check
2981 *
2982 * Currently we only allow D3 for some PCIe ports and for Thunderbolt.
2983 *
2984 * Return: Whether it is possible to move the bridge to D3.
2985 *
2986 * The return value is guaranteed to be constant across the entire lifetime
2987 * of the bridge, including its hot-removal.
2988 */
pci_bridge_d3_possible(struct pci_dev * bridge)2989 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2990 {
2991 if (!pci_is_pcie(bridge))
2992 return false;
2993
2994 switch (pci_pcie_type(bridge)) {
2995 case PCI_EXP_TYPE_ROOT_PORT:
2996 case PCI_EXP_TYPE_UPSTREAM:
2997 case PCI_EXP_TYPE_DOWNSTREAM:
2998 if (pci_bridge_d3_disable)
2999 return false;
3000
3001 /*
3002 * Hotplug ports handled by platform firmware may not be put
3003 * into D3 by the OS, e.g. ACPI slots ...
3004 */
3005 if (bridge->is_hotplug_bridge && !bridge->is_pciehp)
3006 return false;
3007
3008 /* ... or PCIe hotplug ports not handled natively by the OS. */
3009 if (bridge->is_pciehp && !pciehp_is_native(bridge))
3010 return false;
3011
3012 if (pci_bridge_d3_force)
3013 return true;
3014
3015 /* Even the oldest 2010 Thunderbolt controller supports D3. */
3016 if (bridge->is_thunderbolt)
3017 return true;
3018
3019 /* Platform might know better if the bridge supports D3 */
3020 if (platform_pci_bridge_d3(bridge))
3021 return true;
3022
3023 /*
3024 * Hotplug ports handled natively by the OS were not validated
3025 * by vendors for runtime D3 at least until 2018 because there
3026 * was no OS support.
3027 */
3028 if (bridge->is_pciehp)
3029 return false;
3030
3031 if (dmi_check_system(bridge_d3_blacklist))
3032 return false;
3033
3034 /*
3035 * Out of caution, we only allow PCIe ports from 2015 or newer
3036 * into D3 on x86.
3037 */
3038 if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015)
3039 return true;
3040 break;
3041 }
3042
3043 return false;
3044 }
3045
pci_dev_check_d3cold(struct pci_dev * dev,void * data)3046 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3047 {
3048 bool *d3cold_ok = data;
3049
3050 if (/* The device needs to be allowed to go D3cold ... */
3051 dev->no_d3cold || !dev->d3cold_allowed ||
3052
3053 /* ... and if it is wakeup capable to do so from D3cold. */
3054 (device_may_wakeup(&dev->dev) &&
3055 !pci_pme_capable(dev, PCI_D3cold)) ||
3056
3057 /* If it is a bridge it must be allowed to go to D3. */
3058 !pci_power_manageable(dev))
3059
3060 *d3cold_ok = false;
3061
3062 return !*d3cold_ok;
3063 }
3064
3065 /*
3066 * pci_bridge_d3_update - Update bridge D3 capabilities
3067 * @dev: PCI device which is changed
3068 *
3069 * Update upstream bridge PM capabilities accordingly depending on if the
3070 * device PM configuration was changed or the device is being removed. The
3071 * change is also propagated upstream.
3072 */
pci_bridge_d3_update(struct pci_dev * dev)3073 void pci_bridge_d3_update(struct pci_dev *dev)
3074 {
3075 bool remove = !device_is_registered(&dev->dev);
3076 struct pci_dev *bridge;
3077 bool d3cold_ok = true;
3078
3079 bridge = pci_upstream_bridge(dev);
3080 if (!bridge || !pci_bridge_d3_possible(bridge))
3081 return;
3082
3083 /*
3084 * If D3 is currently allowed for the bridge, removing one of its
3085 * children won't change that.
3086 */
3087 if (remove && bridge->bridge_d3)
3088 return;
3089
3090 /*
3091 * If D3 is currently allowed for the bridge and a child is added or
3092 * changed, disallowance of D3 can only be caused by that child, so
3093 * we only need to check that single device, not any of its siblings.
3094 *
3095 * If D3 is currently not allowed for the bridge, checking the device
3096 * first may allow us to skip checking its siblings.
3097 */
3098 if (!remove)
3099 pci_dev_check_d3cold(dev, &d3cold_ok);
3100
3101 /*
3102 * If D3 is currently not allowed for the bridge, this may be caused
3103 * either by the device being changed/removed or any of its siblings,
3104 * so we need to go through all children to find out if one of them
3105 * continues to block D3.
3106 */
3107 if (d3cold_ok && !bridge->bridge_d3)
3108 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3109 &d3cold_ok);
3110
3111 if (bridge->bridge_d3 != d3cold_ok) {
3112 bridge->bridge_d3 = d3cold_ok;
3113 /* Propagate change to upstream bridges */
3114 pci_bridge_d3_update(bridge);
3115 }
3116 }
3117
3118 /**
3119 * pci_d3cold_enable - Enable D3cold for device
3120 * @dev: PCI device to handle
3121 *
3122 * This function can be used in drivers to enable D3cold from the device
3123 * they handle. It also updates upstream PCI bridge PM capabilities
3124 * accordingly.
3125 */
pci_d3cold_enable(struct pci_dev * dev)3126 void pci_d3cold_enable(struct pci_dev *dev)
3127 {
3128 if (dev->no_d3cold) {
3129 dev->no_d3cold = false;
3130 pci_bridge_d3_update(dev);
3131 }
3132 }
3133 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3134
3135 /**
3136 * pci_d3cold_disable - Disable D3cold for device
3137 * @dev: PCI device to handle
3138 *
3139 * This function can be used in drivers to disable D3cold from the device
3140 * they handle. It also updates upstream PCI bridge PM capabilities
3141 * accordingly.
3142 */
pci_d3cold_disable(struct pci_dev * dev)3143 void pci_d3cold_disable(struct pci_dev *dev)
3144 {
3145 if (!dev->no_d3cold) {
3146 dev->no_d3cold = true;
3147 pci_bridge_d3_update(dev);
3148 }
3149 }
3150 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3151
pci_pm_power_up_and_verify_state(struct pci_dev * pci_dev)3152 void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
3153 {
3154 pci_power_up(pci_dev);
3155 pci_update_current_state(pci_dev, PCI_D0);
3156 }
3157
3158 /**
3159 * pci_pm_init - Initialize PM functions of given PCI device
3160 * @dev: PCI device to handle.
3161 */
pci_pm_init(struct pci_dev * dev)3162 void pci_pm_init(struct pci_dev *dev)
3163 {
3164 int pm;
3165 u16 pmc;
3166
3167 device_enable_async_suspend(&dev->dev);
3168 dev->wakeup_prepared = false;
3169
3170 dev->pm_cap = 0;
3171 dev->pme_support = 0;
3172
3173 /* find PCI PM capability in list */
3174 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3175 if (!pm)
3176 goto poweron;
3177 /* Check device's ability to generate PME# */
3178 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3179
3180 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3181 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3182 pmc & PCI_PM_CAP_VER_MASK);
3183 goto poweron;
3184 }
3185
3186 dev->pm_cap = pm;
3187 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3188 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3189 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3190 dev->d3cold_allowed = true;
3191
3192 dev->d1_support = false;
3193 dev->d2_support = false;
3194 if (!pci_no_d1d2(dev)) {
3195 if (pmc & PCI_PM_CAP_D1)
3196 dev->d1_support = true;
3197 if (pmc & PCI_PM_CAP_D2)
3198 dev->d2_support = true;
3199
3200 if (dev->d1_support || dev->d2_support)
3201 pci_info(dev, "supports%s%s\n",
3202 dev->d1_support ? " D1" : "",
3203 dev->d2_support ? " D2" : "");
3204 }
3205
3206 pmc &= PCI_PM_CAP_PME_MASK;
3207 if (pmc) {
3208 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3209 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3210 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3211 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3212 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3213 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3214 dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
3215 dev->pme_poll = true;
3216 /*
3217 * Make device's PM flags reflect the wake-up capability, but
3218 * let the user space enable it to wake up the system as needed.
3219 */
3220 device_set_wakeup_capable(&dev->dev, true);
3221 /* Disable the PME# generation functionality */
3222 pci_pme_active(dev, false);
3223 }
3224
3225 poweron:
3226 pci_pm_power_up_and_verify_state(dev);
3227 pm_runtime_forbid(&dev->dev);
3228 pm_runtime_set_active(&dev->dev);
3229 pm_runtime_enable(&dev->dev);
3230 }
3231
pci_ea_flags(struct pci_dev * dev,u8 prop)3232 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3233 {
3234 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3235
3236 switch (prop) {
3237 case PCI_EA_P_MEM:
3238 case PCI_EA_P_VF_MEM:
3239 flags |= IORESOURCE_MEM;
3240 break;
3241 case PCI_EA_P_MEM_PREFETCH:
3242 case PCI_EA_P_VF_MEM_PREFETCH:
3243 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3244 break;
3245 case PCI_EA_P_IO:
3246 flags |= IORESOURCE_IO;
3247 break;
3248 default:
3249 return 0;
3250 }
3251
3252 return flags;
3253 }
3254
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)3255 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3256 u8 prop)
3257 {
3258 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3259 return &dev->resource[bei];
3260 #ifdef CONFIG_PCI_IOV
3261 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3262 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3263 return &dev->resource[PCI_IOV_RESOURCES +
3264 bei - PCI_EA_BEI_VF_BAR0];
3265 #endif
3266 else if (bei == PCI_EA_BEI_ROM)
3267 return &dev->resource[PCI_ROM_RESOURCE];
3268 else
3269 return NULL;
3270 }
3271
3272 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)3273 static int pci_ea_read(struct pci_dev *dev, int offset)
3274 {
3275 struct resource *res;
3276 const char *res_name;
3277 int ent_size, ent_offset = offset;
3278 resource_size_t start, end;
3279 unsigned long flags;
3280 u32 dw0, bei, base, max_offset;
3281 u8 prop;
3282 bool support_64 = (sizeof(resource_size_t) >= 8);
3283
3284 pci_read_config_dword(dev, ent_offset, &dw0);
3285 ent_offset += 4;
3286
3287 /* Entry size field indicates DWORDs after 1st */
3288 ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2;
3289
3290 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3291 goto out;
3292
3293 bei = FIELD_GET(PCI_EA_BEI, dw0);
3294 prop = FIELD_GET(PCI_EA_PP, dw0);
3295
3296 /*
3297 * If the Property is in the reserved range, try the Secondary
3298 * Property instead.
3299 */
3300 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3301 prop = FIELD_GET(PCI_EA_SP, dw0);
3302 if (prop > PCI_EA_P_BRIDGE_IO)
3303 goto out;
3304
3305 res = pci_ea_get_resource(dev, bei, prop);
3306 res_name = pci_resource_name(dev, bei);
3307 if (!res) {
3308 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3309 goto out;
3310 }
3311
3312 flags = pci_ea_flags(dev, prop);
3313 if (!flags) {
3314 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3315 goto out;
3316 }
3317
3318 /* Read Base */
3319 pci_read_config_dword(dev, ent_offset, &base);
3320 start = (base & PCI_EA_FIELD_MASK);
3321 ent_offset += 4;
3322
3323 /* Read MaxOffset */
3324 pci_read_config_dword(dev, ent_offset, &max_offset);
3325 ent_offset += 4;
3326
3327 /* Read Base MSBs (if 64-bit entry) */
3328 if (base & PCI_EA_IS_64) {
3329 u32 base_upper;
3330
3331 pci_read_config_dword(dev, ent_offset, &base_upper);
3332 ent_offset += 4;
3333
3334 flags |= IORESOURCE_MEM_64;
3335
3336 /* entry starts above 32-bit boundary, can't use */
3337 if (!support_64 && base_upper)
3338 goto out;
3339
3340 if (support_64)
3341 start |= ((u64)base_upper << 32);
3342 }
3343
3344 end = start + (max_offset | 0x03);
3345
3346 /* Read MaxOffset MSBs (if 64-bit entry) */
3347 if (max_offset & PCI_EA_IS_64) {
3348 u32 max_offset_upper;
3349
3350 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3351 ent_offset += 4;
3352
3353 flags |= IORESOURCE_MEM_64;
3354
3355 /* entry too big, can't use */
3356 if (!support_64 && max_offset_upper)
3357 goto out;
3358
3359 if (support_64)
3360 end += ((u64)max_offset_upper << 32);
3361 }
3362
3363 if (end < start) {
3364 pci_err(dev, "EA Entry crosses address boundary\n");
3365 goto out;
3366 }
3367
3368 if (ent_size != ent_offset - offset) {
3369 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3370 ent_size, ent_offset - offset);
3371 goto out;
3372 }
3373
3374 res->name = pci_name(dev);
3375 res->start = start;
3376 res->end = end;
3377 res->flags = flags;
3378
3379 if (bei <= PCI_EA_BEI_BAR5)
3380 pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3381 res_name, res, prop);
3382 else if (bei == PCI_EA_BEI_ROM)
3383 pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3384 res_name, res, prop);
3385 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3386 pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3387 res_name, res, prop);
3388 else
3389 pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
3390 bei, res, prop);
3391
3392 out:
3393 return offset + ent_size;
3394 }
3395
3396 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3397 void pci_ea_init(struct pci_dev *dev)
3398 {
3399 int ea;
3400 u8 num_ent;
3401 int offset;
3402 int i;
3403
3404 /* find PCI EA capability in list */
3405 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3406 if (!ea)
3407 return;
3408
3409 /* determine the number of entries */
3410 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3411 &num_ent);
3412 num_ent &= PCI_EA_NUM_ENT_MASK;
3413
3414 offset = ea + PCI_EA_FIRST_ENT;
3415
3416 /* Skip DWORD 2 for type 1 functions */
3417 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3418 offset += 4;
3419
3420 /* parse each EA entry */
3421 for (i = 0; i < num_ent; ++i)
3422 offset = pci_ea_read(dev, offset);
3423 }
3424
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3425 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3426 struct pci_cap_saved_state *new_cap)
3427 {
3428 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3429 }
3430
3431 /**
3432 * _pci_add_cap_save_buffer - allocate buffer for saving given
3433 * capability registers
3434 * @dev: the PCI device
3435 * @cap: the capability to allocate the buffer for
3436 * @extended: Standard or Extended capability ID
3437 * @size: requested size of the buffer
3438 */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3439 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3440 bool extended, unsigned int size)
3441 {
3442 int pos;
3443 struct pci_cap_saved_state *save_state;
3444
3445 if (extended)
3446 pos = pci_find_ext_capability(dev, cap);
3447 else
3448 pos = pci_find_capability(dev, cap);
3449
3450 if (!pos)
3451 return 0;
3452
3453 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3454 if (!save_state)
3455 return -ENOMEM;
3456
3457 save_state->cap.cap_nr = cap;
3458 save_state->cap.cap_extended = extended;
3459 save_state->cap.size = size;
3460 pci_add_saved_cap(dev, save_state);
3461
3462 return 0;
3463 }
3464
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3465 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3466 {
3467 return _pci_add_cap_save_buffer(dev, cap, false, size);
3468 }
3469
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3470 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3471 {
3472 return _pci_add_cap_save_buffer(dev, cap, true, size);
3473 }
3474
3475 /**
3476 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3477 * @dev: the PCI device
3478 */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3479 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3480 {
3481 int error;
3482
3483 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3484 PCI_EXP_SAVE_REGS * sizeof(u16));
3485 if (error)
3486 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3487
3488 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3489 if (error)
3490 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3491
3492 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3493 2 * sizeof(u16));
3494 if (error)
3495 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3496
3497 pci_allocate_vc_save_buffers(dev);
3498 }
3499
pci_free_cap_save_buffers(struct pci_dev * dev)3500 void pci_free_cap_save_buffers(struct pci_dev *dev)
3501 {
3502 struct pci_cap_saved_state *tmp;
3503 struct hlist_node *n;
3504
3505 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3506 kfree(tmp);
3507 }
3508
3509 /**
3510 * pci_configure_ari - enable or disable ARI forwarding
3511 * @dev: the PCI device
3512 *
3513 * If @dev and its upstream bridge both support ARI, enable ARI in the
3514 * bridge. Otherwise, disable ARI in the bridge.
3515 */
pci_configure_ari(struct pci_dev * dev)3516 void pci_configure_ari(struct pci_dev *dev)
3517 {
3518 u32 cap;
3519 struct pci_dev *bridge;
3520
3521 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3522 return;
3523
3524 bridge = dev->bus->self;
3525 if (!bridge)
3526 return;
3527
3528 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3529 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3530 return;
3531
3532 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3533 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3534 PCI_EXP_DEVCTL2_ARI);
3535 bridge->ari_enabled = 1;
3536 } else {
3537 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3538 PCI_EXP_DEVCTL2_ARI);
3539 bridge->ari_enabled = 0;
3540 }
3541 }
3542
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3543 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3544 {
3545 int pos;
3546 u16 cap, ctrl;
3547
3548 pos = pdev->acs_cap;
3549 if (!pos)
3550 return false;
3551
3552 /*
3553 * Except for egress control, capabilities are either required
3554 * or only required if controllable. Features missing from the
3555 * capability field can therefore be assumed as hard-wired enabled.
3556 */
3557 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3558 acs_flags &= (cap | PCI_ACS_EC);
3559
3560 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3561 return (ctrl & acs_flags) == acs_flags;
3562 }
3563
3564 /**
3565 * pci_acs_enabled - test ACS against required flags for a given device
3566 * @pdev: device to test
3567 * @acs_flags: required PCI ACS flags
3568 *
3569 * Return true if the device supports the provided flags. Automatically
3570 * filters out flags that are not implemented on multifunction devices.
3571 *
3572 * Note that this interface checks the effective ACS capabilities of the
3573 * device rather than the actual capabilities. For instance, most single
3574 * function endpoints are not required to support ACS because they have no
3575 * opportunity for peer-to-peer access. We therefore return 'true'
3576 * regardless of whether the device exposes an ACS capability. This makes
3577 * it much easier for callers of this function to ignore the actual type
3578 * or topology of the device when testing ACS support.
3579 */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3580 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3581 {
3582 int ret;
3583
3584 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3585 if (ret >= 0)
3586 return ret > 0;
3587
3588 /*
3589 * Conventional PCI and PCI-X devices never support ACS, either
3590 * effectively or actually. The shared bus topology implies that
3591 * any device on the bus can receive or snoop DMA.
3592 */
3593 if (!pci_is_pcie(pdev))
3594 return false;
3595
3596 switch (pci_pcie_type(pdev)) {
3597 /*
3598 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3599 * but since their primary interface is PCI/X, we conservatively
3600 * handle them as we would a non-PCIe device.
3601 */
3602 case PCI_EXP_TYPE_PCIE_BRIDGE:
3603 /*
3604 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
3605 * applicable... must never implement an ACS Extended Capability...".
3606 * This seems arbitrary, but we take a conservative interpretation
3607 * of this statement.
3608 */
3609 case PCI_EXP_TYPE_PCI_BRIDGE:
3610 case PCI_EXP_TYPE_RC_EC:
3611 return false;
3612 /*
3613 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3614 * implement ACS in order to indicate their peer-to-peer capabilities,
3615 * regardless of whether they are single- or multi-function devices.
3616 */
3617 case PCI_EXP_TYPE_DOWNSTREAM:
3618 case PCI_EXP_TYPE_ROOT_PORT:
3619 return pci_acs_flags_enabled(pdev, acs_flags);
3620 /*
3621 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3622 * implemented by the remaining PCIe types to indicate peer-to-peer
3623 * capabilities, but only when they are part of a multifunction
3624 * device. The footnote for section 6.12 indicates the specific
3625 * PCIe types included here.
3626 */
3627 case PCI_EXP_TYPE_ENDPOINT:
3628 case PCI_EXP_TYPE_UPSTREAM:
3629 case PCI_EXP_TYPE_LEG_END:
3630 case PCI_EXP_TYPE_RC_END:
3631 if (!pdev->multifunction)
3632 break;
3633
3634 return pci_acs_flags_enabled(pdev, acs_flags);
3635 }
3636
3637 /*
3638 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3639 * to single function devices with the exception of downstream ports.
3640 */
3641 return true;
3642 }
3643
3644 /**
3645 * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3646 * @start: starting downstream device
3647 * @end: ending upstream device or NULL to search to the root bus
3648 * @acs_flags: required flags
3649 *
3650 * Walk up a device tree from start to end testing PCI ACS support. If
3651 * any step along the way does not support the required flags, return false.
3652 */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3653 bool pci_acs_path_enabled(struct pci_dev *start,
3654 struct pci_dev *end, u16 acs_flags)
3655 {
3656 struct pci_dev *pdev, *parent = start;
3657
3658 do {
3659 pdev = parent;
3660
3661 if (!pci_acs_enabled(pdev, acs_flags))
3662 return false;
3663
3664 if (pci_is_root_bus(pdev->bus))
3665 return (end == NULL);
3666
3667 parent = pdev->bus->self;
3668 } while (pdev != end);
3669
3670 return true;
3671 }
3672
3673 /**
3674 * pci_acs_init - Initialize ACS if hardware supports it
3675 * @dev: the PCI device
3676 */
pci_acs_init(struct pci_dev * dev)3677 void pci_acs_init(struct pci_dev *dev)
3678 {
3679 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3680
3681 /*
3682 * Attempt to enable ACS regardless of capability because some Root
3683 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3684 * the standard ACS capability but still support ACS via those
3685 * quirks.
3686 */
3687 pci_enable_acs(dev);
3688 }
3689
pci_rebar_init(struct pci_dev * pdev)3690 void pci_rebar_init(struct pci_dev *pdev)
3691 {
3692 pdev->rebar_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3693 }
3694
3695 /**
3696 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3697 * @pdev: PCI device
3698 * @bar: BAR to find
3699 *
3700 * Helper to find the position of the ctrl register for a BAR.
3701 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3702 * Returns -ENOENT if no ctrl register for the BAR could be found.
3703 */
pci_rebar_find_pos(struct pci_dev * pdev,int bar)3704 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3705 {
3706 unsigned int pos, nbars, i;
3707 u32 ctrl;
3708
3709 if (pci_resource_is_iov(bar)) {
3710 pos = pci_iov_vf_rebar_cap(pdev);
3711 bar = pci_resource_num_to_vf_bar(bar);
3712 } else {
3713 pos = pdev->rebar_cap;
3714 }
3715
3716 if (!pos)
3717 return -ENOTSUPP;
3718
3719 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3720 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
3721
3722 for (i = 0; i < nbars; i++, pos += 8) {
3723 int bar_idx;
3724
3725 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3726 bar_idx = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, ctrl);
3727 if (bar_idx == bar)
3728 return pos;
3729 }
3730
3731 return -ENOENT;
3732 }
3733
3734 /**
3735 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3736 * @pdev: PCI device
3737 * @bar: BAR to query
3738 *
3739 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3740 * (bit 0=1MB, bit 31=128TB). Returns 0 if BAR isn't resizable.
3741 */
pci_rebar_get_possible_sizes(struct pci_dev * pdev,int bar)3742 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3743 {
3744 int pos;
3745 u32 cap;
3746
3747 pos = pci_rebar_find_pos(pdev, bar);
3748 if (pos < 0)
3749 return 0;
3750
3751 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3752 cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
3753
3754 /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3755 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3756 bar == 0 && cap == 0x700)
3757 return 0x3f00;
3758
3759 return cap;
3760 }
3761 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3762
3763 /**
3764 * pci_rebar_get_current_size - get the current size of a BAR
3765 * @pdev: PCI device
3766 * @bar: BAR to set size to
3767 *
3768 * Read the size of a BAR from the resizable BAR config.
3769 * Returns size if found or negative error code.
3770 */
pci_rebar_get_current_size(struct pci_dev * pdev,int bar)3771 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3772 {
3773 int pos;
3774 u32 ctrl;
3775
3776 pos = pci_rebar_find_pos(pdev, bar);
3777 if (pos < 0)
3778 return pos;
3779
3780 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3781 return FIELD_GET(PCI_REBAR_CTRL_BAR_SIZE, ctrl);
3782 }
3783
3784 /**
3785 * pci_rebar_set_size - set a new size for a BAR
3786 * @pdev: PCI device
3787 * @bar: BAR to set size to
3788 * @size: new size as defined in the spec (0=1MB, 31=128TB)
3789 *
3790 * Set the new size of a BAR as defined in the spec.
3791 * Returns zero if resizing was successful, error code otherwise.
3792 */
pci_rebar_set_size(struct pci_dev * pdev,int bar,int size)3793 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3794 {
3795 int pos;
3796 u32 ctrl;
3797
3798 pos = pci_rebar_find_pos(pdev, bar);
3799 if (pos < 0)
3800 return pos;
3801
3802 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3803 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3804 ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
3805 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3806 return 0;
3807 }
3808
3809 /**
3810 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3811 * @dev: the PCI device
3812 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3813 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3814 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3815 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3816 *
3817 * Return 0 if all upstream bridges support AtomicOp routing, egress
3818 * blocking is disabled on all upstream ports, and the root port supports
3819 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3820 * AtomicOp completion), or negative otherwise.
3821 */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3822 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3823 {
3824 struct pci_bus *bus = dev->bus;
3825 struct pci_dev *bridge;
3826 u32 cap, ctl2;
3827
3828 /*
3829 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3830 * in Device Control 2 is reserved in VFs and the PF value applies
3831 * to all associated VFs.
3832 */
3833 if (dev->is_virtfn)
3834 return -EINVAL;
3835
3836 if (!pci_is_pcie(dev))
3837 return -EINVAL;
3838
3839 /*
3840 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3841 * AtomicOp requesters. For now, we only support endpoints as
3842 * requesters and root ports as completers. No endpoints as
3843 * completers, and no peer-to-peer.
3844 */
3845
3846 switch (pci_pcie_type(dev)) {
3847 case PCI_EXP_TYPE_ENDPOINT:
3848 case PCI_EXP_TYPE_LEG_END:
3849 case PCI_EXP_TYPE_RC_END:
3850 break;
3851 default:
3852 return -EINVAL;
3853 }
3854
3855 while (bus->parent) {
3856 bridge = bus->self;
3857
3858 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3859
3860 switch (pci_pcie_type(bridge)) {
3861 /* Ensure switch ports support AtomicOp routing */
3862 case PCI_EXP_TYPE_UPSTREAM:
3863 case PCI_EXP_TYPE_DOWNSTREAM:
3864 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3865 return -EINVAL;
3866 break;
3867
3868 /* Ensure root port supports all the sizes we care about */
3869 case PCI_EXP_TYPE_ROOT_PORT:
3870 if ((cap & cap_mask) != cap_mask)
3871 return -EINVAL;
3872 break;
3873 }
3874
3875 /* Ensure upstream ports don't block AtomicOps on egress */
3876 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3877 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3878 &ctl2);
3879 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3880 return -EINVAL;
3881 }
3882
3883 bus = bus->parent;
3884 }
3885
3886 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3887 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3888 return 0;
3889 }
3890 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3891
3892 /**
3893 * pci_release_region - Release a PCI bar
3894 * @pdev: PCI device whose resources were previously reserved by
3895 * pci_request_region()
3896 * @bar: BAR to release
3897 *
3898 * Releases the PCI I/O and memory resources previously reserved by a
3899 * successful call to pci_request_region(). Call this function only
3900 * after all use of the PCI regions has ceased.
3901 */
pci_release_region(struct pci_dev * pdev,int bar)3902 void pci_release_region(struct pci_dev *pdev, int bar)
3903 {
3904 if (!pci_bar_index_is_valid(bar))
3905 return;
3906
3907 if (pci_resource_len(pdev, bar) == 0)
3908 return;
3909 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3910 release_region(pci_resource_start(pdev, bar),
3911 pci_resource_len(pdev, bar));
3912 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3913 release_mem_region(pci_resource_start(pdev, bar),
3914 pci_resource_len(pdev, bar));
3915 }
3916 EXPORT_SYMBOL(pci_release_region);
3917
3918 /**
3919 * __pci_request_region - Reserved PCI I/O and memory resource
3920 * @pdev: PCI device whose resources are to be reserved
3921 * @bar: BAR to be reserved
3922 * @name: name of the driver requesting the resource
3923 * @exclusive: whether the region access is exclusive or not
3924 *
3925 * Returns: 0 on success, negative error code on failure.
3926 *
3927 * Mark the PCI region associated with PCI device @pdev BAR @bar as being
3928 * reserved by owner @name. Do not access any address inside the PCI regions
3929 * unless this call returns successfully.
3930 *
3931 * If @exclusive is set, then the region is marked so that userspace
3932 * is explicitly not allowed to map the resource via /dev/mem or
3933 * sysfs MMIO access.
3934 *
3935 * Returns 0 on success, or %EBUSY on error. A warning
3936 * message is also printed on failure.
3937 */
__pci_request_region(struct pci_dev * pdev,int bar,const char * name,int exclusive)3938 static int __pci_request_region(struct pci_dev *pdev, int bar,
3939 const char *name, int exclusive)
3940 {
3941 if (!pci_bar_index_is_valid(bar))
3942 return -EINVAL;
3943
3944 if (pci_resource_len(pdev, bar) == 0)
3945 return 0;
3946
3947 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3948 if (!request_region(pci_resource_start(pdev, bar),
3949 pci_resource_len(pdev, bar), name))
3950 goto err_out;
3951 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3952 if (!__request_mem_region(pci_resource_start(pdev, bar),
3953 pci_resource_len(pdev, bar), name,
3954 exclusive))
3955 goto err_out;
3956 }
3957
3958 return 0;
3959
3960 err_out:
3961 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3962 &pdev->resource[bar]);
3963 return -EBUSY;
3964 }
3965
3966 /**
3967 * pci_request_region - Reserve PCI I/O and memory resource
3968 * @pdev: PCI device whose resources are to be reserved
3969 * @bar: BAR to be reserved
3970 * @name: name of the driver requesting the resource
3971 *
3972 * Returns: 0 on success, negative error code on failure.
3973 *
3974 * Mark the PCI region associated with PCI device @pdev BAR @bar as being
3975 * reserved by owner @name. Do not access any address inside the PCI regions
3976 * unless this call returns successfully.
3977 *
3978 * Returns 0 on success, or %EBUSY on error. A warning
3979 * message is also printed on failure.
3980 */
pci_request_region(struct pci_dev * pdev,int bar,const char * name)3981 int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
3982 {
3983 return __pci_request_region(pdev, bar, name, 0);
3984 }
3985 EXPORT_SYMBOL(pci_request_region);
3986
3987 /**
3988 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3989 * @pdev: PCI device whose resources were previously reserved
3990 * @bars: Bitmask of BARs to be released
3991 *
3992 * Release selected PCI I/O and memory resources previously reserved.
3993 * Call this function only after all use of the PCI regions has ceased.
3994 */
pci_release_selected_regions(struct pci_dev * pdev,int bars)3995 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3996 {
3997 int i;
3998
3999 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4000 if (bars & (1 << i))
4001 pci_release_region(pdev, i);
4002 }
4003 EXPORT_SYMBOL(pci_release_selected_regions);
4004
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * name,int excl)4005 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4006 const char *name, int excl)
4007 {
4008 int i;
4009
4010 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4011 if (bars & (1 << i))
4012 if (__pci_request_region(pdev, i, name, excl))
4013 goto err_out;
4014 return 0;
4015
4016 err_out:
4017 while (--i >= 0)
4018 if (bars & (1 << i))
4019 pci_release_region(pdev, i);
4020
4021 return -EBUSY;
4022 }
4023
4024
4025 /**
4026 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4027 * @pdev: PCI device whose resources are to be reserved
4028 * @bars: Bitmask of BARs to be requested
4029 * @name: Name of the driver requesting the resources
4030 *
4031 * Returns: 0 on success, negative error code on failure.
4032 */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * name)4033 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4034 const char *name)
4035 {
4036 return __pci_request_selected_regions(pdev, bars, name, 0);
4037 }
4038 EXPORT_SYMBOL(pci_request_selected_regions);
4039
4040 /**
4041 * pci_request_selected_regions_exclusive - Request regions exclusively
4042 * @pdev: PCI device to request regions from
4043 * @bars: bit mask of BARs to request
4044 * @name: name of the driver requesting the resources
4045 *
4046 * Returns: 0 on success, negative error code on failure.
4047 */
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * name)4048 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4049 const char *name)
4050 {
4051 return __pci_request_selected_regions(pdev, bars, name,
4052 IORESOURCE_EXCLUSIVE);
4053 }
4054 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4055
4056 /**
4057 * pci_release_regions - Release reserved PCI I/O and memory resources
4058 * @pdev: PCI device whose resources were previously reserved by
4059 * pci_request_regions()
4060 *
4061 * Releases all PCI I/O and memory resources previously reserved by a
4062 * successful call to pci_request_regions(). Call this function only
4063 * after all use of the PCI regions has ceased.
4064 */
pci_release_regions(struct pci_dev * pdev)4065 void pci_release_regions(struct pci_dev *pdev)
4066 {
4067 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4068 }
4069 EXPORT_SYMBOL(pci_release_regions);
4070
4071 /**
4072 * pci_request_regions - Reserve PCI I/O and memory resources
4073 * @pdev: PCI device whose resources are to be reserved
4074 * @name: name of the driver requesting the resources
4075 *
4076 * Mark all PCI regions associated with PCI device @pdev as being reserved by
4077 * owner @name. Do not access any address inside the PCI regions unless this
4078 * call returns successfully.
4079 *
4080 * Returns 0 on success, or %EBUSY on error. A warning
4081 * message is also printed on failure.
4082 */
pci_request_regions(struct pci_dev * pdev,const char * name)4083 int pci_request_regions(struct pci_dev *pdev, const char *name)
4084 {
4085 return pci_request_selected_regions(pdev,
4086 ((1 << PCI_STD_NUM_BARS) - 1), name);
4087 }
4088 EXPORT_SYMBOL(pci_request_regions);
4089
4090 /**
4091 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4092 * @pdev: PCI device whose resources are to be reserved
4093 * @name: name of the driver requesting the resources
4094 *
4095 * Returns: 0 on success, negative error code on failure.
4096 *
4097 * Mark all PCI regions associated with PCI device @pdev as being reserved
4098 * by owner @name. Do not access any address inside the PCI regions
4099 * unless this call returns successfully.
4100 *
4101 * pci_request_regions_exclusive() will mark the region so that /dev/mem
4102 * and the sysfs MMIO access will not be allowed.
4103 *
4104 * Returns 0 on success, or %EBUSY on error. A warning message is also
4105 * printed on failure.
4106 */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * name)4107 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
4108 {
4109 return pci_request_selected_regions_exclusive(pdev,
4110 ((1 << PCI_STD_NUM_BARS) - 1), name);
4111 }
4112 EXPORT_SYMBOL(pci_request_regions_exclusive);
4113
4114 /*
4115 * Record the PCI IO range (expressed as CPU physical address + size).
4116 * Return a negative value if an error has occurred, zero otherwise
4117 */
pci_register_io_range(const struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)4118 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
4119 resource_size_t size)
4120 {
4121 int ret = 0;
4122 #ifdef PCI_IOBASE
4123 struct logic_pio_hwaddr *range;
4124
4125 if (!size || addr + size < addr)
4126 return -EINVAL;
4127
4128 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4129 if (!range)
4130 return -ENOMEM;
4131
4132 range->fwnode = fwnode;
4133 range->size = size;
4134 range->hw_start = addr;
4135 range->flags = LOGIC_PIO_CPU_MMIO;
4136
4137 ret = logic_pio_register_range(range);
4138 if (ret)
4139 kfree(range);
4140
4141 /* Ignore duplicates due to deferred probing */
4142 if (ret == -EEXIST)
4143 ret = 0;
4144 #endif
4145
4146 return ret;
4147 }
4148
pci_pio_to_address(unsigned long pio)4149 phys_addr_t pci_pio_to_address(unsigned long pio)
4150 {
4151 #ifdef PCI_IOBASE
4152 if (pio < MMIO_UPPER_LIMIT)
4153 return logic_pio_to_hwaddr(pio);
4154 #endif
4155
4156 return (phys_addr_t) OF_BAD_ADDR;
4157 }
4158 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4159
pci_address_to_pio(phys_addr_t address)4160 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4161 {
4162 #ifdef PCI_IOBASE
4163 return logic_pio_trans_cpuaddr(address);
4164 #else
4165 if (address > IO_SPACE_LIMIT)
4166 return (unsigned long)-1;
4167
4168 return (unsigned long) address;
4169 #endif
4170 }
4171
4172 /**
4173 * pci_remap_iospace - Remap the memory mapped I/O space
4174 * @res: Resource describing the I/O space
4175 * @phys_addr: physical address of range to be mapped
4176 *
4177 * Remap the memory mapped I/O space described by the @res and the CPU
4178 * physical address @phys_addr into virtual address space. Only
4179 * architectures that have memory mapped IO functions defined (and the
4180 * PCI_IOBASE value defined) should call this function.
4181 */
4182 #ifndef pci_remap_iospace
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)4183 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4184 {
4185 #if defined(PCI_IOBASE)
4186 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4187
4188 if (!(res->flags & IORESOURCE_IO))
4189 return -EINVAL;
4190
4191 if (res->end > IO_SPACE_LIMIT)
4192 return -EINVAL;
4193
4194 return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4195 pgprot_device(PAGE_KERNEL));
4196 #else
4197 /*
4198 * This architecture does not have memory mapped I/O space,
4199 * so this function should never be called
4200 */
4201 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4202 return -ENODEV;
4203 #endif
4204 }
4205 EXPORT_SYMBOL(pci_remap_iospace);
4206 #endif
4207
4208 /**
4209 * pci_unmap_iospace - Unmap the memory mapped I/O space
4210 * @res: resource to be unmapped
4211 *
4212 * Unmap the CPU virtual address @res from virtual address space. Only
4213 * architectures that have memory mapped IO functions defined (and the
4214 * PCI_IOBASE value defined) should call this function.
4215 */
pci_unmap_iospace(struct resource * res)4216 void pci_unmap_iospace(struct resource *res)
4217 {
4218 #if defined(PCI_IOBASE)
4219 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4220
4221 vunmap_range(vaddr, vaddr + resource_size(res));
4222 #endif
4223 }
4224 EXPORT_SYMBOL(pci_unmap_iospace);
4225
__pci_set_master(struct pci_dev * dev,bool enable)4226 static void __pci_set_master(struct pci_dev *dev, bool enable)
4227 {
4228 u16 old_cmd, cmd;
4229
4230 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4231 if (enable)
4232 cmd = old_cmd | PCI_COMMAND_MASTER;
4233 else
4234 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4235 if (cmd != old_cmd) {
4236 pci_dbg(dev, "%s bus mastering\n",
4237 enable ? "enabling" : "disabling");
4238 pci_write_config_word(dev, PCI_COMMAND, cmd);
4239 }
4240 dev->is_busmaster = enable;
4241 }
4242
4243 /**
4244 * pcibios_setup - process "pci=" kernel boot arguments
4245 * @str: string used to pass in "pci=" kernel boot arguments
4246 *
4247 * Process kernel boot arguments. This is the default implementation.
4248 * Architecture specific implementations can override this as necessary.
4249 */
pcibios_setup(char * str)4250 char * __weak __init pcibios_setup(char *str)
4251 {
4252 return str;
4253 }
4254
4255 /**
4256 * pcibios_set_master - enable PCI bus-mastering for device dev
4257 * @dev: the PCI device to enable
4258 *
4259 * Enables PCI bus-mastering for the device. This is the default
4260 * implementation. Architecture specific implementations can override
4261 * this if necessary.
4262 */
pcibios_set_master(struct pci_dev * dev)4263 void __weak pcibios_set_master(struct pci_dev *dev)
4264 {
4265 u8 lat;
4266
4267 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4268 if (pci_is_pcie(dev))
4269 return;
4270
4271 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4272 if (lat < 16)
4273 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4274 else if (lat > pcibios_max_latency)
4275 lat = pcibios_max_latency;
4276 else
4277 return;
4278
4279 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4280 }
4281
4282 /**
4283 * pci_set_master - enables bus-mastering for device dev
4284 * @dev: the PCI device to enable
4285 *
4286 * Enables bus-mastering on the device and calls pcibios_set_master()
4287 * to do the needed arch specific settings.
4288 */
pci_set_master(struct pci_dev * dev)4289 void pci_set_master(struct pci_dev *dev)
4290 {
4291 __pci_set_master(dev, true);
4292 pcibios_set_master(dev);
4293 }
4294 EXPORT_SYMBOL(pci_set_master);
4295
4296 /**
4297 * pci_clear_master - disables bus-mastering for device dev
4298 * @dev: the PCI device to disable
4299 */
pci_clear_master(struct pci_dev * dev)4300 void pci_clear_master(struct pci_dev *dev)
4301 {
4302 __pci_set_master(dev, false);
4303 }
4304 EXPORT_SYMBOL(pci_clear_master);
4305
4306 /**
4307 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4308 * @dev: the PCI device for which MWI is to be enabled
4309 *
4310 * Helper function for pci_set_mwi.
4311 * Originally copied from drivers/net/acenic.c.
4312 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4313 *
4314 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4315 */
pci_set_cacheline_size(struct pci_dev * dev)4316 int pci_set_cacheline_size(struct pci_dev *dev)
4317 {
4318 u8 cacheline_size;
4319
4320 if (!pci_cache_line_size)
4321 return -EINVAL;
4322
4323 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4324 equal to or multiple of the right value. */
4325 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4326 if (cacheline_size >= pci_cache_line_size &&
4327 (cacheline_size % pci_cache_line_size) == 0)
4328 return 0;
4329
4330 /* Write the correct value. */
4331 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4332 /* Read it back. */
4333 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4334 if (cacheline_size == pci_cache_line_size)
4335 return 0;
4336
4337 pci_dbg(dev, "cache line size of %d is not supported\n",
4338 pci_cache_line_size << 2);
4339
4340 return -EINVAL;
4341 }
4342 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4343
4344 /**
4345 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4346 * @dev: the PCI device for which MWI is enabled
4347 *
4348 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4349 *
4350 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4351 */
pci_set_mwi(struct pci_dev * dev)4352 int pci_set_mwi(struct pci_dev *dev)
4353 {
4354 #ifdef PCI_DISABLE_MWI
4355 return 0;
4356 #else
4357 int rc;
4358 u16 cmd;
4359
4360 rc = pci_set_cacheline_size(dev);
4361 if (rc)
4362 return rc;
4363
4364 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4365 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4366 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4367 cmd |= PCI_COMMAND_INVALIDATE;
4368 pci_write_config_word(dev, PCI_COMMAND, cmd);
4369 }
4370 return 0;
4371 #endif
4372 }
4373 EXPORT_SYMBOL(pci_set_mwi);
4374
4375 /**
4376 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4377 * @dev: the PCI device for which MWI is enabled
4378 *
4379 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4380 * Callers are not required to check the return value.
4381 *
4382 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4383 */
pci_try_set_mwi(struct pci_dev * dev)4384 int pci_try_set_mwi(struct pci_dev *dev)
4385 {
4386 #ifdef PCI_DISABLE_MWI
4387 return 0;
4388 #else
4389 return pci_set_mwi(dev);
4390 #endif
4391 }
4392 EXPORT_SYMBOL(pci_try_set_mwi);
4393
4394 /**
4395 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4396 * @dev: the PCI device to disable
4397 *
4398 * Disables PCI Memory-Write-Invalidate transaction on the device
4399 */
pci_clear_mwi(struct pci_dev * dev)4400 void pci_clear_mwi(struct pci_dev *dev)
4401 {
4402 #ifndef PCI_DISABLE_MWI
4403 u16 cmd;
4404
4405 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4406 if (cmd & PCI_COMMAND_INVALIDATE) {
4407 cmd &= ~PCI_COMMAND_INVALIDATE;
4408 pci_write_config_word(dev, PCI_COMMAND, cmd);
4409 }
4410 #endif
4411 }
4412 EXPORT_SYMBOL(pci_clear_mwi);
4413
4414 /**
4415 * pci_disable_parity - disable parity checking for device
4416 * @dev: the PCI device to operate on
4417 *
4418 * Disable parity checking for device @dev
4419 */
pci_disable_parity(struct pci_dev * dev)4420 void pci_disable_parity(struct pci_dev *dev)
4421 {
4422 u16 cmd;
4423
4424 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4425 if (cmd & PCI_COMMAND_PARITY) {
4426 cmd &= ~PCI_COMMAND_PARITY;
4427 pci_write_config_word(dev, PCI_COMMAND, cmd);
4428 }
4429 }
4430
4431 /**
4432 * pci_intx - enables/disables PCI INTx for device dev
4433 * @pdev: the PCI device to operate on
4434 * @enable: boolean: whether to enable or disable PCI INTx
4435 *
4436 * Enables/disables PCI INTx for device @pdev
4437 */
pci_intx(struct pci_dev * pdev,int enable)4438 void pci_intx(struct pci_dev *pdev, int enable)
4439 {
4440 u16 pci_command, new;
4441
4442 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4443
4444 if (enable)
4445 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4446 else
4447 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4448
4449 if (new == pci_command)
4450 return;
4451
4452 pci_write_config_word(pdev, PCI_COMMAND, new);
4453 }
4454 EXPORT_SYMBOL_GPL(pci_intx);
4455
4456 /**
4457 * pci_wait_for_pending_transaction - wait for pending transaction
4458 * @dev: the PCI device to operate on
4459 *
4460 * Return 0 if transaction is pending 1 otherwise.
4461 */
pci_wait_for_pending_transaction(struct pci_dev * dev)4462 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4463 {
4464 if (!pci_is_pcie(dev))
4465 return 1;
4466
4467 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4468 PCI_EXP_DEVSTA_TRPND);
4469 }
4470 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4471
4472 /**
4473 * pcie_flr - initiate a PCIe function level reset
4474 * @dev: device to reset
4475 *
4476 * Initiate a function level reset unconditionally on @dev without
4477 * checking any flags and DEVCAP
4478 */
pcie_flr(struct pci_dev * dev)4479 int pcie_flr(struct pci_dev *dev)
4480 {
4481 if (!pci_wait_for_pending_transaction(dev))
4482 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4483
4484 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4485
4486 if (dev->imm_ready)
4487 return 0;
4488
4489 /*
4490 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4491 * 100ms, but may silently discard requests while the FLR is in
4492 * progress. Wait 100ms before trying to access the device.
4493 */
4494 msleep(100);
4495
4496 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4497 }
4498 EXPORT_SYMBOL_GPL(pcie_flr);
4499
4500 /**
4501 * pcie_reset_flr - initiate a PCIe function level reset
4502 * @dev: device to reset
4503 * @probe: if true, return 0 if device can be reset this way
4504 *
4505 * Initiate a function level reset on @dev.
4506 */
pcie_reset_flr(struct pci_dev * dev,bool probe)4507 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4508 {
4509 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4510 return -ENOTTY;
4511
4512 if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4513 return -ENOTTY;
4514
4515 if (probe)
4516 return 0;
4517
4518 return pcie_flr(dev);
4519 }
4520 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4521
pci_af_flr(struct pci_dev * dev,bool probe)4522 static int pci_af_flr(struct pci_dev *dev, bool probe)
4523 {
4524 int pos;
4525 u8 cap;
4526
4527 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4528 if (!pos)
4529 return -ENOTTY;
4530
4531 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4532 return -ENOTTY;
4533
4534 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4535 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4536 return -ENOTTY;
4537
4538 if (probe)
4539 return 0;
4540
4541 /*
4542 * Wait for Transaction Pending bit to clear. A word-aligned test
4543 * is used, so we use the control offset rather than status and shift
4544 * the test bit to match.
4545 */
4546 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4547 PCI_AF_STATUS_TP << 8))
4548 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4549
4550 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4551
4552 if (dev->imm_ready)
4553 return 0;
4554
4555 /*
4556 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4557 * updated 27 July 2006; a device must complete an FLR within
4558 * 100ms, but may silently discard requests while the FLR is in
4559 * progress. Wait 100ms before trying to access the device.
4560 */
4561 msleep(100);
4562
4563 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4564 }
4565
4566 /**
4567 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4568 * @dev: Device to reset.
4569 * @probe: if true, return 0 if the device can be reset this way.
4570 *
4571 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4572 * unset, it will be reinitialized internally when going from PCI_D3hot to
4573 * PCI_D0. If that's the case and the device is not in a low-power state
4574 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4575 *
4576 * NOTE: This causes the caller to sleep for twice the device power transition
4577 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4578 * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4579 * Moreover, only devices in D0 can be reset by this function.
4580 */
pci_pm_reset(struct pci_dev * dev,bool probe)4581 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4582 {
4583 u16 csr;
4584
4585 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4586 return -ENOTTY;
4587
4588 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4589 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4590 return -ENOTTY;
4591
4592 if (probe)
4593 return 0;
4594
4595 if (dev->current_state != PCI_D0)
4596 return -EINVAL;
4597
4598 csr &= ~PCI_PM_CTRL_STATE_MASK;
4599 csr |= PCI_D3hot;
4600 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4601 pci_dev_d3_sleep(dev);
4602
4603 csr &= ~PCI_PM_CTRL_STATE_MASK;
4604 csr |= PCI_D0;
4605 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4606 pci_dev_d3_sleep(dev);
4607
4608 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4609 }
4610
4611 /**
4612 * pcie_wait_for_link_status - Wait for link status change
4613 * @pdev: Device whose link to wait for.
4614 * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE.
4615 * @active: Waiting for active or inactive?
4616 *
4617 * Return 0 if successful, or -ETIMEDOUT if status has not changed within
4618 * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4619 */
pcie_wait_for_link_status(struct pci_dev * pdev,bool use_lt,bool active)4620 static int pcie_wait_for_link_status(struct pci_dev *pdev,
4621 bool use_lt, bool active)
4622 {
4623 u16 lnksta_mask, lnksta_match;
4624 unsigned long end_jiffies;
4625 u16 lnksta;
4626
4627 lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA;
4628 lnksta_match = active ? lnksta_mask : 0;
4629
4630 end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS);
4631 do {
4632 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
4633 if ((lnksta & lnksta_mask) == lnksta_match)
4634 return 0;
4635 msleep(1);
4636 } while (time_before(jiffies, end_jiffies));
4637
4638 return -ETIMEDOUT;
4639 }
4640
4641 /**
4642 * pcie_retrain_link - Request a link retrain and wait for it to complete
4643 * @pdev: Device whose link to retrain.
4644 * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
4645 *
4646 * Trigger retraining of the PCIe Link and wait for the completion of the
4647 * retraining. As link retraining is known to asserts LBMS and may change
4648 * the Link Speed, LBMS is cleared after the retraining and the Link Speed
4649 * of the subordinate bus is updated.
4650 *
4651 * Retrain completion status is retrieved from the Link Status Register
4652 * according to @use_lt. It is not verified whether the use of the DLLLA
4653 * bit is valid.
4654 *
4655 * Return 0 if successful, or -ETIMEDOUT if training has not completed
4656 * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4657 */
pcie_retrain_link(struct pci_dev * pdev,bool use_lt)4658 int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
4659 {
4660 int rc;
4661
4662 /*
4663 * Ensure the updated LNKCTL parameters are used during link
4664 * training by checking that there is no ongoing link training that
4665 * may have started before link parameters were changed, so as to
4666 * avoid LTSSM race as recommended in Implementation Note at the end
4667 * of PCIe r6.1 sec 7.5.3.7.
4668 */
4669 rc = pcie_wait_for_link_status(pdev, true, false);
4670 if (rc)
4671 return rc;
4672
4673 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4674 if (pdev->clear_retrain_link) {
4675 /*
4676 * Due to an erratum in some devices the Retrain Link bit
4677 * needs to be cleared again manually to allow the link
4678 * training to succeed.
4679 */
4680 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4681 }
4682
4683 rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
4684
4685 /*
4686 * Clear LBMS after a manual retrain so that the bit can be used
4687 * to track link speed or width changes made by hardware itself
4688 * in attempt to correct unreliable link operation.
4689 */
4690 pcie_reset_lbms(pdev);
4691
4692 /*
4693 * Ensure the Link Speed updates after retraining in case the Link
4694 * Speed was changed because of the retraining. While the bwctrl's
4695 * IRQ handler normally picks up the new Link Speed, clearing LBMS
4696 * races with the IRQ handler reading the Link Status register and
4697 * can result in the handler returning early without updating the
4698 * Link Speed.
4699 */
4700 if (pdev->subordinate)
4701 pcie_update_link_speed(pdev->subordinate);
4702
4703 return rc;
4704 }
4705
4706 /**
4707 * pcie_wait_for_link_delay - Wait until link is active or inactive
4708 * @pdev: Bridge device
4709 * @active: waiting for active or inactive?
4710 * @delay: Delay to wait after link has become active (in ms)
4711 *
4712 * Use this to wait till link becomes active or inactive.
4713 */
pcie_wait_for_link_delay(struct pci_dev * pdev,bool active,int delay)4714 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4715 int delay)
4716 {
4717 int rc;
4718
4719 /*
4720 * Some controllers might not implement link active reporting. In this
4721 * case, we wait for 1000 ms + any delay requested by the caller.
4722 */
4723 if (!pdev->link_active_reporting) {
4724 msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay);
4725 return true;
4726 }
4727
4728 /*
4729 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4730 * after which we should expect the link to be active if the reset was
4731 * successful. If so, software must wait a minimum 100ms before sending
4732 * configuration requests to devices downstream this port.
4733 *
4734 * If the link fails to activate, either the device was physically
4735 * removed or the link is permanently failed.
4736 */
4737 if (active)
4738 msleep(20);
4739 rc = pcie_wait_for_link_status(pdev, false, active);
4740 if (active) {
4741 if (rc)
4742 rc = pcie_failed_link_retrain(pdev);
4743 if (rc)
4744 return false;
4745
4746 msleep(delay);
4747 return true;
4748 }
4749
4750 if (rc)
4751 return false;
4752
4753 return true;
4754 }
4755
4756 /**
4757 * pcie_wait_for_link - Wait until link is active or inactive
4758 * @pdev: Bridge device
4759 * @active: waiting for active or inactive?
4760 *
4761 * Use this to wait till link becomes active or inactive.
4762 */
pcie_wait_for_link(struct pci_dev * pdev,bool active)4763 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4764 {
4765 return pcie_wait_for_link_delay(pdev, active, 100);
4766 }
4767
4768 /*
4769 * Find maximum D3cold delay required by all the devices on the bus. The
4770 * spec says 100 ms, but firmware can lower it and we allow drivers to
4771 * increase it as well.
4772 *
4773 * Called with @pci_bus_sem locked for reading.
4774 */
pci_bus_max_d3cold_delay(const struct pci_bus * bus)4775 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4776 {
4777 const struct pci_dev *pdev;
4778 int min_delay = 100;
4779 int max_delay = 0;
4780
4781 list_for_each_entry(pdev, &bus->devices, bus_list) {
4782 if (pdev->d3cold_delay < min_delay)
4783 min_delay = pdev->d3cold_delay;
4784 if (pdev->d3cold_delay > max_delay)
4785 max_delay = pdev->d3cold_delay;
4786 }
4787
4788 return max(min_delay, max_delay);
4789 }
4790
4791 /**
4792 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4793 * @dev: PCI bridge
4794 * @reset_type: reset type in human-readable form
4795 *
4796 * Handle necessary delays before access to the devices on the secondary
4797 * side of the bridge are permitted after D3cold to D0 transition
4798 * or Conventional Reset.
4799 *
4800 * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4801 * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4802 * 4.3.2.
4803 *
4804 * Return 0 on success or -ENOTTY if the first device on the secondary bus
4805 * failed to become accessible.
4806 */
pci_bridge_wait_for_secondary_bus(struct pci_dev * dev,char * reset_type)4807 int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
4808 {
4809 struct pci_dev *child __free(pci_dev_put) = NULL;
4810 int delay;
4811
4812 if (pci_dev_is_disconnected(dev))
4813 return 0;
4814
4815 if (!pci_is_bridge(dev))
4816 return 0;
4817
4818 down_read(&pci_bus_sem);
4819
4820 /*
4821 * We only deal with devices that are present currently on the bus.
4822 * For any hot-added devices the access delay is handled in pciehp
4823 * board_added(). In case of ACPI hotplug the firmware is expected
4824 * to configure the devices before OS is notified.
4825 */
4826 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4827 up_read(&pci_bus_sem);
4828 return 0;
4829 }
4830
4831 /* Take d3cold_delay requirements into account */
4832 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4833 if (!delay) {
4834 up_read(&pci_bus_sem);
4835 return 0;
4836 }
4837
4838 child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
4839 struct pci_dev, bus_list));
4840 up_read(&pci_bus_sem);
4841
4842 /*
4843 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4844 * accessing the device after reset (that is 1000 ms + 100 ms).
4845 */
4846 if (!pci_is_pcie(dev)) {
4847 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4848 msleep(1000 + delay);
4849 return 0;
4850 }
4851
4852 /*
4853 * For PCIe downstream and root ports that do not support speeds
4854 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4855 * speeds (gen3) we need to wait first for the data link layer to
4856 * become active.
4857 *
4858 * However, 100 ms is the minimum and the PCIe spec says the
4859 * software must allow at least 1s before it can determine that the
4860 * device that did not respond is a broken device. Also device can
4861 * take longer than that to respond if it indicates so through Request
4862 * Retry Status completions.
4863 *
4864 * Therefore we wait for 100 ms and check for the device presence
4865 * until the timeout expires.
4866 */
4867 if (!pcie_downstream_port(dev))
4868 return 0;
4869
4870 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4871 u16 status;
4872
4873 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4874 msleep(delay);
4875
4876 if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay))
4877 return 0;
4878
4879 /*
4880 * If the port supports active link reporting we now check
4881 * whether the link is active and if not bail out early with
4882 * the assumption that the device is not present anymore.
4883 */
4884 if (!dev->link_active_reporting)
4885 return -ENOTTY;
4886
4887 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status);
4888 if (!(status & PCI_EXP_LNKSTA_DLLLA))
4889 return -ENOTTY;
4890
4891 return pci_dev_wait(child, reset_type,
4892 PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT);
4893 }
4894
4895 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4896 delay);
4897 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4898 /* Did not train, no need to wait any further */
4899 pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay);
4900 return -ENOTTY;
4901 }
4902
4903 return pci_dev_wait(child, reset_type,
4904 PCIE_RESET_READY_POLL_MS - delay);
4905 }
4906
pci_reset_secondary_bus(struct pci_dev * dev)4907 void pci_reset_secondary_bus(struct pci_dev *dev)
4908 {
4909 u16 ctrl;
4910
4911 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4912 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4913 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4914
4915 /*
4916 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
4917 * this to 2ms to ensure that we meet the minimum requirement.
4918 */
4919 msleep(2);
4920
4921 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4922 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4923 }
4924
pcibios_reset_secondary_bus(struct pci_dev * dev)4925 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4926 {
4927 pci_reset_secondary_bus(dev);
4928 }
4929
4930 /**
4931 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4932 * @dev: Bridge device
4933 *
4934 * Use the bridge control register to assert reset on the secondary bus.
4935 * Devices on the secondary bus are left in power-on state.
4936 */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)4937 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4938 {
4939 if (!dev->block_cfg_access)
4940 pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
4941 __builtin_return_address(0));
4942 pcibios_reset_secondary_bus(dev);
4943
4944 return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
4945 }
4946 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4947
pci_parent_bus_reset(struct pci_dev * dev,bool probe)4948 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
4949 {
4950 struct pci_dev *pdev;
4951
4952 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4953 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4954 return -ENOTTY;
4955
4956 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4957 if (pdev != dev)
4958 return -ENOTTY;
4959
4960 if (probe)
4961 return 0;
4962
4963 return pci_bridge_secondary_bus_reset(dev->bus->self);
4964 }
4965
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,bool probe)4966 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
4967 {
4968 int rc = -ENOTTY;
4969
4970 if (!hotplug || !try_module_get(hotplug->owner))
4971 return rc;
4972
4973 if (hotplug->ops->reset_slot)
4974 rc = hotplug->ops->reset_slot(hotplug, probe);
4975
4976 module_put(hotplug->owner);
4977
4978 return rc;
4979 }
4980
pci_dev_reset_slot_function(struct pci_dev * dev,bool probe)4981 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
4982 {
4983 if (dev->multifunction || dev->subordinate || !dev->slot ||
4984 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4985 return -ENOTTY;
4986
4987 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4988 }
4989
cxl_port_dvsec(struct pci_dev * dev)4990 static u16 cxl_port_dvsec(struct pci_dev *dev)
4991 {
4992 return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
4993 PCI_DVSEC_CXL_PORT);
4994 }
4995
cxl_sbr_masked(struct pci_dev * dev)4996 static bool cxl_sbr_masked(struct pci_dev *dev)
4997 {
4998 u16 dvsec, reg;
4999 int rc;
5000
5001 dvsec = cxl_port_dvsec(dev);
5002 if (!dvsec)
5003 return false;
5004
5005 rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, ®);
5006 if (rc || PCI_POSSIBLE_ERROR(reg))
5007 return false;
5008
5009 /*
5010 * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR
5011 * bit in Bridge Control has no effect. When 1, the Port generates
5012 * hot reset when the SBR bit is set to 1.
5013 */
5014 if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR)
5015 return false;
5016
5017 return true;
5018 }
5019
pci_reset_bus_function(struct pci_dev * dev,bool probe)5020 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5021 {
5022 struct pci_dev *bridge = pci_upstream_bridge(dev);
5023 int rc;
5024
5025 /*
5026 * If "dev" is below a CXL port that has SBR control masked, SBR
5027 * won't do anything, so return error.
5028 */
5029 if (bridge && cxl_sbr_masked(bridge)) {
5030 if (probe)
5031 return 0;
5032
5033 return -ENOTTY;
5034 }
5035
5036 rc = pci_dev_reset_slot_function(dev, probe);
5037 if (rc != -ENOTTY)
5038 return rc;
5039 return pci_parent_bus_reset(dev, probe);
5040 }
5041
cxl_reset_bus_function(struct pci_dev * dev,bool probe)5042 static int cxl_reset_bus_function(struct pci_dev *dev, bool probe)
5043 {
5044 struct pci_dev *bridge;
5045 u16 dvsec, reg, val;
5046 int rc;
5047
5048 bridge = pci_upstream_bridge(dev);
5049 if (!bridge)
5050 return -ENOTTY;
5051
5052 dvsec = cxl_port_dvsec(bridge);
5053 if (!dvsec)
5054 return -ENOTTY;
5055
5056 if (probe)
5057 return 0;
5058
5059 rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, ®);
5060 if (rc)
5061 return -ENOTTY;
5062
5063 if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) {
5064 val = reg;
5065 } else {
5066 val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR;
5067 pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
5068 val);
5069 }
5070
5071 rc = pci_reset_bus_function(dev, probe);
5072
5073 if (reg != val)
5074 pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
5075 reg);
5076
5077 return rc;
5078 }
5079
pci_dev_lock(struct pci_dev * dev)5080 void pci_dev_lock(struct pci_dev *dev)
5081 {
5082 /* block PM suspend, driver probe, etc. */
5083 device_lock(&dev->dev);
5084 pci_cfg_access_lock(dev);
5085 }
5086 EXPORT_SYMBOL_GPL(pci_dev_lock);
5087
5088 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)5089 int pci_dev_trylock(struct pci_dev *dev)
5090 {
5091 if (device_trylock(&dev->dev)) {
5092 if (pci_cfg_access_trylock(dev))
5093 return 1;
5094 device_unlock(&dev->dev);
5095 }
5096
5097 return 0;
5098 }
5099 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5100
pci_dev_unlock(struct pci_dev * dev)5101 void pci_dev_unlock(struct pci_dev *dev)
5102 {
5103 pci_cfg_access_unlock(dev);
5104 device_unlock(&dev->dev);
5105 }
5106 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5107
pci_dev_save_and_disable(struct pci_dev * dev)5108 static void pci_dev_save_and_disable(struct pci_dev *dev)
5109 {
5110 const struct pci_error_handlers *err_handler =
5111 dev->driver ? dev->driver->err_handler : NULL;
5112
5113 /*
5114 * dev->driver->err_handler->reset_prepare() is protected against
5115 * races with ->remove() by the device lock, which must be held by
5116 * the caller.
5117 */
5118 if (err_handler && err_handler->reset_prepare)
5119 err_handler->reset_prepare(dev);
5120 else if (dev->driver)
5121 pci_warn(dev, "resetting");
5122
5123 /*
5124 * Wake-up device prior to save. PM registers default to D0 after
5125 * reset and a simple register restore doesn't reliably return
5126 * to a non-D0 state anyway.
5127 */
5128 pci_set_power_state(dev, PCI_D0);
5129
5130 pci_save_state(dev);
5131 /*
5132 * Disable the device by clearing the Command register, except for
5133 * INTx-disable which is set. This not only disables MMIO and I/O port
5134 * BARs, but also prevents the device from being Bus Master, preventing
5135 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
5136 * compliant devices, INTx-disable prevents legacy interrupts.
5137 */
5138 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5139 }
5140
pci_dev_restore(struct pci_dev * dev)5141 static void pci_dev_restore(struct pci_dev *dev)
5142 {
5143 const struct pci_error_handlers *err_handler =
5144 dev->driver ? dev->driver->err_handler : NULL;
5145
5146 pci_restore_state(dev);
5147
5148 /*
5149 * dev->driver->err_handler->reset_done() is protected against
5150 * races with ->remove() by the device lock, which must be held by
5151 * the caller.
5152 */
5153 if (err_handler && err_handler->reset_done)
5154 err_handler->reset_done(dev);
5155 else if (dev->driver)
5156 pci_warn(dev, "reset done");
5157 }
5158
5159 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5160 const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5161 { },
5162 { pci_dev_specific_reset, .name = "device_specific" },
5163 { pci_dev_acpi_reset, .name = "acpi" },
5164 { pcie_reset_flr, .name = "flr" },
5165 { pci_af_flr, .name = "af_flr" },
5166 { pci_pm_reset, .name = "pm" },
5167 { pci_reset_bus_function, .name = "bus" },
5168 { cxl_reset_bus_function, .name = "cxl_bus" },
5169 };
5170
5171 /**
5172 * __pci_reset_function_locked - reset a PCI device function while holding
5173 * the @dev mutex lock.
5174 * @dev: PCI device to reset
5175 *
5176 * Some devices allow an individual function to be reset without affecting
5177 * other functions in the same device. The PCI device must be responsive
5178 * to PCI config space in order to use this function.
5179 *
5180 * The device function is presumed to be unused and the caller is holding
5181 * the device mutex lock when this function is called.
5182 *
5183 * Resetting the device will make the contents of PCI configuration space
5184 * random, so any caller of this must be prepared to reinitialise the
5185 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5186 * etc.
5187 *
5188 * Returns 0 if the device function was successfully reset or negative if the
5189 * device doesn't support resetting a single function.
5190 */
__pci_reset_function_locked(struct pci_dev * dev)5191 int __pci_reset_function_locked(struct pci_dev *dev)
5192 {
5193 int i, m, rc;
5194 const struct pci_reset_fn_method *method;
5195
5196 might_sleep();
5197
5198 /*
5199 * A reset method returns -ENOTTY if it doesn't support this device and
5200 * we should try the next method.
5201 *
5202 * If it returns 0 (success), we're finished. If it returns any other
5203 * error, we're also finished: this indicates that further reset
5204 * mechanisms might be broken on the device.
5205 */
5206 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5207 m = dev->reset_methods[i];
5208 if (!m)
5209 return -ENOTTY;
5210
5211 method = &pci_reset_fn_methods[m];
5212 pci_dbg(dev, "reset via %s\n", method->name);
5213 rc = method->reset_fn(dev, PCI_RESET_DO_RESET);
5214 if (!rc)
5215 return 0;
5216
5217 pci_dbg(dev, "%s failed with %d\n", method->name, rc);
5218 if (rc != -ENOTTY)
5219 return rc;
5220 }
5221
5222 return -ENOTTY;
5223 }
5224 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5225
5226 /**
5227 * pci_init_reset_methods - check whether device can be safely reset
5228 * and store supported reset mechanisms.
5229 * @dev: PCI device to check for reset mechanisms
5230 *
5231 * Some devices allow an individual function to be reset without affecting
5232 * other functions in the same device. The PCI device must be in D0-D3hot
5233 * state.
5234 *
5235 * Stores reset mechanisms supported by device in reset_methods byte array
5236 * which is a member of struct pci_dev.
5237 */
pci_init_reset_methods(struct pci_dev * dev)5238 void pci_init_reset_methods(struct pci_dev *dev)
5239 {
5240 int m, i, rc;
5241
5242 BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5243
5244 might_sleep();
5245
5246 i = 0;
5247 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5248 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5249 if (!rc)
5250 dev->reset_methods[i++] = m;
5251 else if (rc != -ENOTTY)
5252 break;
5253 }
5254
5255 dev->reset_methods[i] = 0;
5256 }
5257
5258 /**
5259 * pci_reset_function - quiesce and reset a PCI device function
5260 * @dev: PCI device to reset
5261 *
5262 * Some devices allow an individual function to be reset without affecting
5263 * other functions in the same device. The PCI device must be responsive
5264 * to PCI config space in order to use this function.
5265 *
5266 * This function does not just reset the PCI portion of a device, but
5267 * clears all the state associated with the device. This function differs
5268 * from __pci_reset_function_locked() in that it saves and restores device state
5269 * over the reset and takes the PCI device lock.
5270 *
5271 * Returns 0 if the device function was successfully reset or negative if the
5272 * device doesn't support resetting a single function.
5273 */
pci_reset_function(struct pci_dev * dev)5274 int pci_reset_function(struct pci_dev *dev)
5275 {
5276 struct pci_dev *bridge;
5277 int rc;
5278
5279 if (!pci_reset_supported(dev))
5280 return -ENOTTY;
5281
5282 /*
5283 * If there's no upstream bridge, no locking is needed since there is
5284 * no upstream bridge configuration to hold consistent.
5285 */
5286 bridge = pci_upstream_bridge(dev);
5287 if (bridge)
5288 pci_dev_lock(bridge);
5289
5290 pci_dev_lock(dev);
5291 pci_dev_save_and_disable(dev);
5292
5293 rc = __pci_reset_function_locked(dev);
5294
5295 pci_dev_restore(dev);
5296 pci_dev_unlock(dev);
5297
5298 if (bridge)
5299 pci_dev_unlock(bridge);
5300
5301 return rc;
5302 }
5303 EXPORT_SYMBOL_GPL(pci_reset_function);
5304
5305 /**
5306 * pci_reset_function_locked - quiesce and reset a PCI device function
5307 * @dev: PCI device to reset
5308 *
5309 * Some devices allow an individual function to be reset without affecting
5310 * other functions in the same device. The PCI device must be responsive
5311 * to PCI config space in order to use this function.
5312 *
5313 * This function does not just reset the PCI portion of a device, but
5314 * clears all the state associated with the device. This function differs
5315 * from __pci_reset_function_locked() in that it saves and restores device state
5316 * over the reset. It also differs from pci_reset_function() in that it
5317 * requires the PCI device lock to be held.
5318 *
5319 * Returns 0 if the device function was successfully reset or negative if the
5320 * device doesn't support resetting a single function.
5321 */
pci_reset_function_locked(struct pci_dev * dev)5322 int pci_reset_function_locked(struct pci_dev *dev)
5323 {
5324 int rc;
5325
5326 if (!pci_reset_supported(dev))
5327 return -ENOTTY;
5328
5329 pci_dev_save_and_disable(dev);
5330
5331 rc = __pci_reset_function_locked(dev);
5332
5333 pci_dev_restore(dev);
5334
5335 return rc;
5336 }
5337 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5338
5339 /**
5340 * pci_try_reset_function - quiesce and reset a PCI device function
5341 * @dev: PCI device to reset
5342 *
5343 * Same as above, except return -EAGAIN if unable to lock device.
5344 */
pci_try_reset_function(struct pci_dev * dev)5345 int pci_try_reset_function(struct pci_dev *dev)
5346 {
5347 int rc;
5348
5349 if (!pci_reset_supported(dev))
5350 return -ENOTTY;
5351
5352 if (!pci_dev_trylock(dev))
5353 return -EAGAIN;
5354
5355 pci_dev_save_and_disable(dev);
5356 rc = __pci_reset_function_locked(dev);
5357 pci_dev_restore(dev);
5358 pci_dev_unlock(dev);
5359
5360 return rc;
5361 }
5362 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5363
5364 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resettable(struct pci_bus * bus)5365 static bool pci_bus_resettable(struct pci_bus *bus)
5366 {
5367 struct pci_dev *dev;
5368
5369
5370 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5371 return false;
5372
5373 list_for_each_entry(dev, &bus->devices, bus_list) {
5374 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5375 (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5376 return false;
5377 }
5378
5379 return true;
5380 }
5381
5382 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5383 static void pci_bus_lock(struct pci_bus *bus)
5384 {
5385 struct pci_dev *dev;
5386
5387 pci_dev_lock(bus->self);
5388 list_for_each_entry(dev, &bus->devices, bus_list) {
5389 if (dev->subordinate)
5390 pci_bus_lock(dev->subordinate);
5391 else
5392 pci_dev_lock(dev);
5393 }
5394 }
5395
5396 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5397 static void pci_bus_unlock(struct pci_bus *bus)
5398 {
5399 struct pci_dev *dev;
5400
5401 list_for_each_entry(dev, &bus->devices, bus_list) {
5402 if (dev->subordinate)
5403 pci_bus_unlock(dev->subordinate);
5404 else
5405 pci_dev_unlock(dev);
5406 }
5407 pci_dev_unlock(bus->self);
5408 }
5409
5410 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5411 static int pci_bus_trylock(struct pci_bus *bus)
5412 {
5413 struct pci_dev *dev;
5414
5415 if (!pci_dev_trylock(bus->self))
5416 return 0;
5417
5418 list_for_each_entry(dev, &bus->devices, bus_list) {
5419 if (dev->subordinate) {
5420 if (!pci_bus_trylock(dev->subordinate))
5421 goto unlock;
5422 } else if (!pci_dev_trylock(dev))
5423 goto unlock;
5424 }
5425 return 1;
5426
5427 unlock:
5428 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5429 if (dev->subordinate)
5430 pci_bus_unlock(dev->subordinate);
5431 else
5432 pci_dev_unlock(dev);
5433 }
5434 pci_dev_unlock(bus->self);
5435 return 0;
5436 }
5437
5438 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resettable(struct pci_slot * slot)5439 static bool pci_slot_resettable(struct pci_slot *slot)
5440 {
5441 struct pci_dev *dev;
5442
5443 if (slot->bus->self &&
5444 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5445 return false;
5446
5447 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5448 if (!dev->slot || dev->slot != slot)
5449 continue;
5450 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5451 (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5452 return false;
5453 }
5454
5455 return true;
5456 }
5457
5458 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5459 static void pci_slot_lock(struct pci_slot *slot)
5460 {
5461 struct pci_dev *dev;
5462
5463 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5464 if (!dev->slot || dev->slot != slot)
5465 continue;
5466 if (dev->subordinate)
5467 pci_bus_lock(dev->subordinate);
5468 else
5469 pci_dev_lock(dev);
5470 }
5471 }
5472
5473 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5474 static void pci_slot_unlock(struct pci_slot *slot)
5475 {
5476 struct pci_dev *dev;
5477
5478 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5479 if (!dev->slot || dev->slot != slot)
5480 continue;
5481 if (dev->subordinate)
5482 pci_bus_unlock(dev->subordinate);
5483 else
5484 pci_dev_unlock(dev);
5485 }
5486 }
5487
5488 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5489 static int pci_slot_trylock(struct pci_slot *slot)
5490 {
5491 struct pci_dev *dev;
5492
5493 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5494 if (!dev->slot || dev->slot != slot)
5495 continue;
5496 if (dev->subordinate) {
5497 if (!pci_bus_trylock(dev->subordinate)) {
5498 pci_dev_unlock(dev);
5499 goto unlock;
5500 }
5501 } else if (!pci_dev_trylock(dev))
5502 goto unlock;
5503 }
5504 return 1;
5505
5506 unlock:
5507 list_for_each_entry_continue_reverse(dev,
5508 &slot->bus->devices, bus_list) {
5509 if (!dev->slot || dev->slot != slot)
5510 continue;
5511 if (dev->subordinate)
5512 pci_bus_unlock(dev->subordinate);
5513 else
5514 pci_dev_unlock(dev);
5515 }
5516 return 0;
5517 }
5518
5519 /*
5520 * Save and disable devices from the top of the tree down while holding
5521 * the @dev mutex lock for the entire tree.
5522 */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5523 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5524 {
5525 struct pci_dev *dev;
5526
5527 list_for_each_entry(dev, &bus->devices, bus_list) {
5528 pci_dev_save_and_disable(dev);
5529 if (dev->subordinate)
5530 pci_bus_save_and_disable_locked(dev->subordinate);
5531 }
5532 }
5533
5534 /*
5535 * Restore devices from top of the tree down while holding @dev mutex lock
5536 * for the entire tree. Parent bridges need to be restored before we can
5537 * get to subordinate devices.
5538 */
pci_bus_restore_locked(struct pci_bus * bus)5539 static void pci_bus_restore_locked(struct pci_bus *bus)
5540 {
5541 struct pci_dev *dev;
5542
5543 list_for_each_entry(dev, &bus->devices, bus_list) {
5544 pci_dev_restore(dev);
5545 if (dev->subordinate) {
5546 pci_bridge_wait_for_secondary_bus(dev, "bus reset");
5547 pci_bus_restore_locked(dev->subordinate);
5548 }
5549 }
5550 }
5551
5552 /*
5553 * Save and disable devices from the top of the tree down while holding
5554 * the @dev mutex lock for the entire tree.
5555 */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5556 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5557 {
5558 struct pci_dev *dev;
5559
5560 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5561 if (!dev->slot || dev->slot != slot)
5562 continue;
5563 pci_dev_save_and_disable(dev);
5564 if (dev->subordinate)
5565 pci_bus_save_and_disable_locked(dev->subordinate);
5566 }
5567 }
5568
5569 /*
5570 * Restore devices from top of the tree down while holding @dev mutex lock
5571 * for the entire tree. Parent bridges need to be restored before we can
5572 * get to subordinate devices.
5573 */
pci_slot_restore_locked(struct pci_slot * slot)5574 static void pci_slot_restore_locked(struct pci_slot *slot)
5575 {
5576 struct pci_dev *dev;
5577
5578 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5579 if (!dev->slot || dev->slot != slot)
5580 continue;
5581 pci_dev_restore(dev);
5582 if (dev->subordinate) {
5583 pci_bridge_wait_for_secondary_bus(dev, "slot reset");
5584 pci_bus_restore_locked(dev->subordinate);
5585 }
5586 }
5587 }
5588
pci_slot_reset(struct pci_slot * slot,bool probe)5589 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5590 {
5591 int rc;
5592
5593 if (!slot || !pci_slot_resettable(slot))
5594 return -ENOTTY;
5595
5596 if (!probe)
5597 pci_slot_lock(slot);
5598
5599 might_sleep();
5600
5601 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5602
5603 if (!probe)
5604 pci_slot_unlock(slot);
5605
5606 return rc;
5607 }
5608
5609 /**
5610 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5611 * @slot: PCI slot to probe
5612 *
5613 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5614 */
pci_probe_reset_slot(struct pci_slot * slot)5615 int pci_probe_reset_slot(struct pci_slot *slot)
5616 {
5617 return pci_slot_reset(slot, PCI_RESET_PROBE);
5618 }
5619 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5620
5621 /**
5622 * __pci_reset_slot - Try to reset a PCI slot
5623 * @slot: PCI slot to reset
5624 *
5625 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5626 * independent of other slots. For instance, some slots may support slot power
5627 * control. In the case of a 1:1 bus to slot architecture, this function may
5628 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5629 * Generally a slot reset should be attempted before a bus reset. All of the
5630 * function of the slot and any subordinate buses behind the slot are reset
5631 * through this function. PCI config space of all devices in the slot and
5632 * behind the slot is saved before and restored after reset.
5633 *
5634 * Same as above except return -EAGAIN if the slot cannot be locked
5635 */
__pci_reset_slot(struct pci_slot * slot)5636 static int __pci_reset_slot(struct pci_slot *slot)
5637 {
5638 int rc;
5639
5640 rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5641 if (rc)
5642 return rc;
5643
5644 if (pci_slot_trylock(slot)) {
5645 pci_slot_save_and_disable_locked(slot);
5646 might_sleep();
5647 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5648 pci_slot_restore_locked(slot);
5649 pci_slot_unlock(slot);
5650 } else
5651 rc = -EAGAIN;
5652
5653 return rc;
5654 }
5655
pci_bus_reset(struct pci_bus * bus,bool probe)5656 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5657 {
5658 int ret;
5659
5660 if (!bus->self || !pci_bus_resettable(bus))
5661 return -ENOTTY;
5662
5663 if (probe)
5664 return 0;
5665
5666 pci_bus_lock(bus);
5667
5668 might_sleep();
5669
5670 ret = pci_bridge_secondary_bus_reset(bus->self);
5671
5672 pci_bus_unlock(bus);
5673
5674 return ret;
5675 }
5676
5677 /**
5678 * pci_bus_error_reset - reset the bridge's subordinate bus
5679 * @bridge: The parent device that connects to the bus to reset
5680 *
5681 * This function will first try to reset the slots on this bus if the method is
5682 * available. If slot reset fails or is not available, this will fall back to a
5683 * secondary bus reset.
5684 */
pci_bus_error_reset(struct pci_dev * bridge)5685 int pci_bus_error_reset(struct pci_dev *bridge)
5686 {
5687 struct pci_bus *bus = bridge->subordinate;
5688 struct pci_slot *slot;
5689
5690 if (!bus)
5691 return -ENOTTY;
5692
5693 mutex_lock(&pci_slot_mutex);
5694 if (list_empty(&bus->slots))
5695 goto bus_reset;
5696
5697 list_for_each_entry(slot, &bus->slots, list)
5698 if (pci_probe_reset_slot(slot))
5699 goto bus_reset;
5700
5701 list_for_each_entry(slot, &bus->slots, list)
5702 if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5703 goto bus_reset;
5704
5705 mutex_unlock(&pci_slot_mutex);
5706 return 0;
5707 bus_reset:
5708 mutex_unlock(&pci_slot_mutex);
5709 return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5710 }
5711
5712 /**
5713 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5714 * @bus: PCI bus to probe
5715 *
5716 * Return 0 if bus can be reset, negative if a bus reset is not supported.
5717 */
pci_probe_reset_bus(struct pci_bus * bus)5718 int pci_probe_reset_bus(struct pci_bus *bus)
5719 {
5720 return pci_bus_reset(bus, PCI_RESET_PROBE);
5721 }
5722 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5723
5724 /**
5725 * __pci_reset_bus - Try to reset a PCI bus
5726 * @bus: top level PCI bus to reset
5727 *
5728 * Same as above except return -EAGAIN if the bus cannot be locked
5729 */
__pci_reset_bus(struct pci_bus * bus)5730 int __pci_reset_bus(struct pci_bus *bus)
5731 {
5732 int rc;
5733
5734 rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5735 if (rc)
5736 return rc;
5737
5738 if (pci_bus_trylock(bus)) {
5739 pci_bus_save_and_disable_locked(bus);
5740 might_sleep();
5741 rc = pci_bridge_secondary_bus_reset(bus->self);
5742 pci_bus_restore_locked(bus);
5743 pci_bus_unlock(bus);
5744 } else
5745 rc = -EAGAIN;
5746
5747 return rc;
5748 }
5749
5750 /**
5751 * pci_reset_bus - Try to reset a PCI bus
5752 * @pdev: top level PCI device to reset via slot/bus
5753 *
5754 * Same as above except return -EAGAIN if the bus cannot be locked
5755 */
pci_reset_bus(struct pci_dev * pdev)5756 int pci_reset_bus(struct pci_dev *pdev)
5757 {
5758 return (!pci_probe_reset_slot(pdev->slot)) ?
5759 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5760 }
5761 EXPORT_SYMBOL_GPL(pci_reset_bus);
5762
5763 /**
5764 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5765 * @dev: PCI device to query
5766 *
5767 * Returns mmrbc: maximum designed memory read count in bytes or
5768 * appropriate error value.
5769 */
pcix_get_max_mmrbc(struct pci_dev * dev)5770 int pcix_get_max_mmrbc(struct pci_dev *dev)
5771 {
5772 int cap;
5773 u32 stat;
5774
5775 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5776 if (!cap)
5777 return -EINVAL;
5778
5779 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5780 return -EINVAL;
5781
5782 return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat);
5783 }
5784 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5785
5786 /**
5787 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5788 * @dev: PCI device to query
5789 *
5790 * Returns mmrbc: maximum memory read count in bytes or appropriate error
5791 * value.
5792 */
pcix_get_mmrbc(struct pci_dev * dev)5793 int pcix_get_mmrbc(struct pci_dev *dev)
5794 {
5795 int cap;
5796 u16 cmd;
5797
5798 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5799 if (!cap)
5800 return -EINVAL;
5801
5802 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5803 return -EINVAL;
5804
5805 return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5806 }
5807 EXPORT_SYMBOL(pcix_get_mmrbc);
5808
5809 /**
5810 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5811 * @dev: PCI device to query
5812 * @mmrbc: maximum memory read count in bytes
5813 * valid values are 512, 1024, 2048, 4096
5814 *
5815 * If possible sets maximum memory read byte count, some bridges have errata
5816 * that prevent this.
5817 */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)5818 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5819 {
5820 int cap;
5821 u32 stat, v, o;
5822 u16 cmd;
5823
5824 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5825 return -EINVAL;
5826
5827 v = ffs(mmrbc) - 10;
5828
5829 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5830 if (!cap)
5831 return -EINVAL;
5832
5833 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5834 return -EINVAL;
5835
5836 if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat))
5837 return -E2BIG;
5838
5839 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5840 return -EINVAL;
5841
5842 o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5843 if (o != v) {
5844 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5845 return -EIO;
5846
5847 cmd &= ~PCI_X_CMD_MAX_READ;
5848 cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v);
5849 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5850 return -EIO;
5851 }
5852 return 0;
5853 }
5854 EXPORT_SYMBOL(pcix_set_mmrbc);
5855
5856 /**
5857 * pcie_get_readrq - get PCI Express read request size
5858 * @dev: PCI device to query
5859 *
5860 * Returns maximum memory read request in bytes or appropriate error value.
5861 */
pcie_get_readrq(struct pci_dev * dev)5862 int pcie_get_readrq(struct pci_dev *dev)
5863 {
5864 u16 ctl;
5865
5866 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5867
5868 return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl);
5869 }
5870 EXPORT_SYMBOL(pcie_get_readrq);
5871
5872 /**
5873 * pcie_set_readrq - set PCI Express maximum memory read request
5874 * @dev: PCI device to query
5875 * @rq: maximum memory read count in bytes
5876 * valid values are 128, 256, 512, 1024, 2048, 4096
5877 *
5878 * If possible sets maximum memory read request in bytes
5879 */
pcie_set_readrq(struct pci_dev * dev,int rq)5880 int pcie_set_readrq(struct pci_dev *dev, int rq)
5881 {
5882 u16 v;
5883 int ret;
5884 unsigned int firstbit;
5885 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
5886
5887 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5888 return -EINVAL;
5889
5890 /*
5891 * If using the "performance" PCIe config, we clamp the read rq
5892 * size to the max packet size to keep the host bridge from
5893 * generating requests larger than we can cope with.
5894 */
5895 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5896 int mps = pcie_get_mps(dev);
5897
5898 if (mps < rq)
5899 rq = mps;
5900 }
5901
5902 firstbit = ffs(rq);
5903 if (firstbit < 8)
5904 return -EINVAL;
5905 v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, firstbit - 8);
5906
5907 if (bridge->no_inc_mrrs) {
5908 int max_mrrs = pcie_get_readrq(dev);
5909
5910 if (rq > max_mrrs) {
5911 pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
5912 return -EINVAL;
5913 }
5914 }
5915
5916 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5917 PCI_EXP_DEVCTL_READRQ, v);
5918
5919 return pcibios_err_to_errno(ret);
5920 }
5921 EXPORT_SYMBOL(pcie_set_readrq);
5922
5923 /**
5924 * pcie_get_mps - get PCI Express maximum payload size
5925 * @dev: PCI device to query
5926 *
5927 * Returns maximum payload size in bytes
5928 */
pcie_get_mps(struct pci_dev * dev)5929 int pcie_get_mps(struct pci_dev *dev)
5930 {
5931 u16 ctl;
5932
5933 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5934
5935 return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl);
5936 }
5937 EXPORT_SYMBOL(pcie_get_mps);
5938
5939 /**
5940 * pcie_set_mps - set PCI Express maximum payload size
5941 * @dev: PCI device to query
5942 * @mps: maximum payload size in bytes
5943 * valid values are 128, 256, 512, 1024, 2048, 4096
5944 *
5945 * If possible sets maximum payload size
5946 */
pcie_set_mps(struct pci_dev * dev,int mps)5947 int pcie_set_mps(struct pci_dev *dev, int mps)
5948 {
5949 u16 v;
5950 int ret;
5951
5952 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5953 return -EINVAL;
5954
5955 v = ffs(mps) - 8;
5956 if (v > dev->pcie_mpss)
5957 return -EINVAL;
5958 v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v);
5959
5960 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5961 PCI_EXP_DEVCTL_PAYLOAD, v);
5962
5963 return pcibios_err_to_errno(ret);
5964 }
5965 EXPORT_SYMBOL(pcie_set_mps);
5966
to_pcie_link_speed(u16 lnksta)5967 static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
5968 {
5969 return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
5970 }
5971
pcie_link_speed_mbps(struct pci_dev * pdev)5972 int pcie_link_speed_mbps(struct pci_dev *pdev)
5973 {
5974 u16 lnksta;
5975 int err;
5976
5977 err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
5978 if (err)
5979 return err;
5980
5981 return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta));
5982 }
5983 EXPORT_SYMBOL(pcie_link_speed_mbps);
5984
5985 /**
5986 * pcie_bandwidth_available - determine minimum link settings of a PCIe
5987 * device and its bandwidth limitation
5988 * @dev: PCI device to query
5989 * @limiting_dev: storage for device causing the bandwidth limitation
5990 * @speed: storage for speed of limiting device
5991 * @width: storage for width of limiting device
5992 *
5993 * Walk up the PCI device chain and find the point where the minimum
5994 * bandwidth is available. Return the bandwidth available there and (if
5995 * limiting_dev, speed, and width pointers are supplied) information about
5996 * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of
5997 * raw bandwidth.
5998 */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)5999 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6000 enum pci_bus_speed *speed,
6001 enum pcie_link_width *width)
6002 {
6003 u16 lnksta;
6004 enum pci_bus_speed next_speed;
6005 enum pcie_link_width next_width;
6006 u32 bw, next_bw;
6007
6008 if (speed)
6009 *speed = PCI_SPEED_UNKNOWN;
6010 if (width)
6011 *width = PCIE_LNK_WIDTH_UNKNOWN;
6012
6013 bw = 0;
6014
6015 while (dev) {
6016 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6017
6018 next_speed = to_pcie_link_speed(lnksta);
6019 next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
6020
6021 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6022
6023 /* Check if current device limits the total bandwidth */
6024 if (!bw || next_bw <= bw) {
6025 bw = next_bw;
6026
6027 if (limiting_dev)
6028 *limiting_dev = dev;
6029 if (speed)
6030 *speed = next_speed;
6031 if (width)
6032 *width = next_width;
6033 }
6034
6035 dev = pci_upstream_bridge(dev);
6036 }
6037
6038 return bw;
6039 }
6040 EXPORT_SYMBOL(pcie_bandwidth_available);
6041
6042 /**
6043 * pcie_get_supported_speeds - query Supported Link Speed Vector
6044 * @dev: PCI device to query
6045 *
6046 * Query @dev supported link speeds.
6047 *
6048 * Implementation Note in PCIe r6.0 sec 7.5.3.18 recommends determining
6049 * supported link speeds using the Supported Link Speeds Vector in the Link
6050 * Capabilities 2 Register (when available).
6051 *
6052 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.
6053 *
6054 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, Supported Link
6055 * Speeds field in Link Capabilities is used and only 2.5 GT/s and 5.0 GT/s
6056 * speeds were defined.
6057 *
6058 * For @dev without Supported Link Speed Vector, the field is synthesized
6059 * from the Max Link Speed field in the Link Capabilities Register.
6060 *
6061 * Return: Supported Link Speeds Vector (+ reserved 0 at LSB).
6062 */
pcie_get_supported_speeds(struct pci_dev * dev)6063 u8 pcie_get_supported_speeds(struct pci_dev *dev)
6064 {
6065 u32 lnkcap2, lnkcap;
6066 u8 speeds;
6067
6068 /*
6069 * Speeds retain the reserved 0 at LSB before PCIe Supported Link
6070 * Speeds Vector to allow using SLS Vector bit defines directly.
6071 */
6072 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6073 speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS;
6074
6075 /* Ignore speeds higher than Max Link Speed */
6076 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6077 speeds &= GENMASK(lnkcap & PCI_EXP_LNKCAP_SLS, 0);
6078
6079 /* PCIe r3.0-compliant */
6080 if (speeds)
6081 return speeds;
6082
6083 /* Synthesize from the Max Link Speed field */
6084 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6085 speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB;
6086 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6087 speeds = PCI_EXP_LNKCAP2_SLS_2_5GB;
6088
6089 return speeds;
6090 }
6091
6092 /**
6093 * pcie_get_speed_cap - query for the PCI device's link speed capability
6094 * @dev: PCI device to query
6095 *
6096 * Query the PCI device speed capability.
6097 *
6098 * Return: the maximum link speed supported by the device.
6099 */
pcie_get_speed_cap(struct pci_dev * dev)6100 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6101 {
6102 return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds);
6103 }
6104 EXPORT_SYMBOL(pcie_get_speed_cap);
6105
6106 /**
6107 * pcie_get_width_cap - query for the PCI device's link width capability
6108 * @dev: PCI device to query
6109 *
6110 * Query the PCI device width capability. Return the maximum link width
6111 * supported by the device.
6112 */
pcie_get_width_cap(struct pci_dev * dev)6113 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6114 {
6115 u32 lnkcap;
6116
6117 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6118 if (lnkcap)
6119 return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
6120
6121 return PCIE_LNK_WIDTH_UNKNOWN;
6122 }
6123 EXPORT_SYMBOL(pcie_get_width_cap);
6124
6125 /**
6126 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6127 * @dev: PCI device
6128 * @speed: storage for link speed
6129 * @width: storage for link width
6130 *
6131 * Calculate a PCI device's link bandwidth by querying for its link speed
6132 * and width, multiplying them, and applying encoding overhead. The result
6133 * is in Mb/s, i.e., megabits/second of raw bandwidth.
6134 */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6135 static u32 pcie_bandwidth_capable(struct pci_dev *dev,
6136 enum pci_bus_speed *speed,
6137 enum pcie_link_width *width)
6138 {
6139 *speed = pcie_get_speed_cap(dev);
6140 *width = pcie_get_width_cap(dev);
6141
6142 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6143 return 0;
6144
6145 return *width * PCIE_SPEED2MBS_ENC(*speed);
6146 }
6147
6148 /**
6149 * __pcie_print_link_status - Report the PCI device's link speed and width
6150 * @dev: PCI device to query
6151 * @verbose: Print info even when enough bandwidth is available
6152 *
6153 * If the available bandwidth at the device is less than the device is
6154 * capable of, report the device's maximum possible bandwidth and the
6155 * upstream link that limits its performance. If @verbose, always print
6156 * the available bandwidth, even if the device isn't constrained.
6157 */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)6158 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6159 {
6160 enum pcie_link_width width, width_cap;
6161 enum pci_bus_speed speed, speed_cap;
6162 struct pci_dev *limiting_dev = NULL;
6163 u32 bw_avail, bw_cap;
6164 char *flit_mode = "";
6165
6166 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6167 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6168
6169 if (dev->bus && dev->bus->flit_mode)
6170 flit_mode = ", in Flit mode";
6171
6172 if (bw_avail >= bw_cap && verbose)
6173 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)%s\n",
6174 bw_cap / 1000, bw_cap % 1000,
6175 pci_speed_string(speed_cap), width_cap, flit_mode);
6176 else if (bw_avail < bw_cap)
6177 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)%s\n",
6178 bw_avail / 1000, bw_avail % 1000,
6179 pci_speed_string(speed), width,
6180 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6181 bw_cap / 1000, bw_cap % 1000,
6182 pci_speed_string(speed_cap), width_cap, flit_mode);
6183 }
6184
6185 /**
6186 * pcie_print_link_status - Report the PCI device's link speed and width
6187 * @dev: PCI device to query
6188 *
6189 * Report the available bandwidth at the device.
6190 */
pcie_print_link_status(struct pci_dev * dev)6191 void pcie_print_link_status(struct pci_dev *dev)
6192 {
6193 __pcie_print_link_status(dev, true);
6194 }
6195 EXPORT_SYMBOL(pcie_print_link_status);
6196
6197 /**
6198 * pci_select_bars - Make BAR mask from the type of resource
6199 * @dev: the PCI device for which BAR mask is made
6200 * @flags: resource type mask to be selected
6201 *
6202 * This helper routine makes bar mask from the type of resource.
6203 */
pci_select_bars(struct pci_dev * dev,unsigned long flags)6204 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6205 {
6206 int i, bars = 0;
6207 for (i = 0; i < PCI_NUM_RESOURCES; i++)
6208 if (pci_resource_flags(dev, i) & flags)
6209 bars |= (1 << i);
6210 return bars;
6211 }
6212 EXPORT_SYMBOL(pci_select_bars);
6213
6214 /* Some architectures require additional programming to enable VGA */
6215 static arch_set_vga_state_t arch_set_vga_state;
6216
pci_register_set_vga_state(arch_set_vga_state_t func)6217 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6218 {
6219 arch_set_vga_state = func; /* NULL disables */
6220 }
6221
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6222 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6223 unsigned int command_bits, u32 flags)
6224 {
6225 if (arch_set_vga_state)
6226 return arch_set_vga_state(dev, decode, command_bits,
6227 flags);
6228 return 0;
6229 }
6230
6231 /**
6232 * pci_set_vga_state - set VGA decode state on device and parents if requested
6233 * @dev: the PCI device
6234 * @decode: true = enable decoding, false = disable decoding
6235 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6236 * @flags: traverse ancestors and change bridges
6237 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6238 */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6239 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6240 unsigned int command_bits, u32 flags)
6241 {
6242 struct pci_bus *bus;
6243 struct pci_dev *bridge;
6244 u16 cmd;
6245 int rc;
6246
6247 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6248
6249 /* ARCH specific VGA enables */
6250 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6251 if (rc)
6252 return rc;
6253
6254 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6255 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6256 if (decode)
6257 cmd |= command_bits;
6258 else
6259 cmd &= ~command_bits;
6260 pci_write_config_word(dev, PCI_COMMAND, cmd);
6261 }
6262
6263 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6264 return 0;
6265
6266 bus = dev->bus;
6267 while (bus) {
6268 bridge = bus->self;
6269 if (bridge) {
6270 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6271 &cmd);
6272 if (decode)
6273 cmd |= PCI_BRIDGE_CTL_VGA;
6274 else
6275 cmd &= ~PCI_BRIDGE_CTL_VGA;
6276 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6277 cmd);
6278 }
6279 bus = bus->parent;
6280 }
6281 return 0;
6282 }
6283
6284 #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)6285 bool pci_pr3_present(struct pci_dev *pdev)
6286 {
6287 struct acpi_device *adev;
6288
6289 if (acpi_disabled)
6290 return false;
6291
6292 adev = ACPI_COMPANION(&pdev->dev);
6293 if (!adev)
6294 return false;
6295
6296 return adev->power.flags.power_resources &&
6297 acpi_has_method(adev->handle, "_PR3");
6298 }
6299 EXPORT_SYMBOL_GPL(pci_pr3_present);
6300 #endif
6301
6302 /**
6303 * pci_add_dma_alias - Add a DMA devfn alias for a device
6304 * @dev: the PCI device for which alias is added
6305 * @devfn_from: alias slot and function
6306 * @nr_devfns: number of subsequent devfns to alias
6307 *
6308 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6309 * which is used to program permissible bus-devfn source addresses for DMA
6310 * requests in an IOMMU. These aliases factor into IOMMU group creation
6311 * and are useful for devices generating DMA requests beyond or different
6312 * from their logical bus-devfn. Examples include device quirks where the
6313 * device simply uses the wrong devfn, as well as non-transparent bridges
6314 * where the alias may be a proxy for devices in another domain.
6315 *
6316 * IOMMU group creation is performed during device discovery or addition,
6317 * prior to any potential DMA mapping and therefore prior to driver probing
6318 * (especially for userspace assigned devices where IOMMU group definition
6319 * cannot be left as a userspace activity). DMA aliases should therefore
6320 * be configured via quirks, such as the PCI fixup header quirk.
6321 */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn_from,unsigned int nr_devfns)6322 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6323 unsigned int nr_devfns)
6324 {
6325 int devfn_to;
6326
6327 nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6328 devfn_to = devfn_from + nr_devfns - 1;
6329
6330 if (!dev->dma_alias_mask)
6331 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6332 if (!dev->dma_alias_mask) {
6333 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6334 return;
6335 }
6336
6337 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6338
6339 if (nr_devfns == 1)
6340 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6341 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6342 else if (nr_devfns > 1)
6343 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6344 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6345 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6346 }
6347
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)6348 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6349 {
6350 return (dev1->dma_alias_mask &&
6351 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6352 (dev2->dma_alias_mask &&
6353 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6354 pci_real_dma_dev(dev1) == dev2 ||
6355 pci_real_dma_dev(dev2) == dev1;
6356 }
6357
pci_device_is_present(struct pci_dev * pdev)6358 bool pci_device_is_present(struct pci_dev *pdev)
6359 {
6360 u32 v;
6361
6362 /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6363 pdev = pci_physfn(pdev);
6364 if (pci_dev_is_disconnected(pdev))
6365 return false;
6366 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6367 }
6368 EXPORT_SYMBOL_GPL(pci_device_is_present);
6369
pci_ignore_hotplug(struct pci_dev * dev)6370 void pci_ignore_hotplug(struct pci_dev *dev)
6371 {
6372 struct pci_dev *bridge = dev->bus->self;
6373
6374 dev->ignore_hotplug = 1;
6375 /* Propagate the "ignore hotplug" setting to the parent bridge. */
6376 if (bridge)
6377 bridge->ignore_hotplug = 1;
6378 }
6379 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6380
6381 /**
6382 * pci_real_dma_dev - Get PCI DMA device for PCI device
6383 * @dev: the PCI device that may have a PCI DMA alias
6384 *
6385 * Permits the platform to provide architecture-specific functionality to
6386 * devices needing to alias DMA to another PCI device on another PCI bus. If
6387 * the PCI device is on the same bus, it is recommended to use
6388 * pci_add_dma_alias(). This is the default implementation. Architecture
6389 * implementations can override this.
6390 */
pci_real_dma_dev(struct pci_dev * dev)6391 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6392 {
6393 return dev;
6394 }
6395
pcibios_default_alignment(void)6396 resource_size_t __weak pcibios_default_alignment(void)
6397 {
6398 return 0;
6399 }
6400
6401 /*
6402 * Arches that don't want to expose struct resource to userland as-is in
6403 * sysfs and /proc can implement their own pci_resource_to_user().
6404 */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)6405 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6406 const struct resource *rsrc,
6407 resource_size_t *start, resource_size_t *end)
6408 {
6409 *start = rsrc->start;
6410 *end = rsrc->end;
6411 }
6412
6413 static char *resource_alignment_param;
6414 static DEFINE_SPINLOCK(resource_alignment_lock);
6415
6416 /**
6417 * pci_specified_resource_alignment - get resource alignment specified by user.
6418 * @dev: the PCI device to get
6419 * @resize: whether or not to change resources' size when reassigning alignment
6420 *
6421 * RETURNS: Resource alignment if it is specified.
6422 * Zero if it is not specified.
6423 */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)6424 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6425 bool *resize)
6426 {
6427 int align_order, count;
6428 resource_size_t align = pcibios_default_alignment();
6429 const char *p;
6430 int ret;
6431
6432 spin_lock(&resource_alignment_lock);
6433 p = resource_alignment_param;
6434 if (!p || !*p)
6435 goto out;
6436 if (pci_has_flag(PCI_PROBE_ONLY)) {
6437 align = 0;
6438 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6439 goto out;
6440 }
6441
6442 while (*p) {
6443 count = 0;
6444 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6445 p[count] == '@') {
6446 p += count + 1;
6447 if (align_order > 63) {
6448 pr_err("PCI: Invalid requested alignment (order %d)\n",
6449 align_order);
6450 align_order = PAGE_SHIFT;
6451 }
6452 } else {
6453 align_order = PAGE_SHIFT;
6454 }
6455
6456 ret = pci_dev_str_match(dev, p, &p);
6457 if (ret == 1) {
6458 *resize = true;
6459 align = 1ULL << align_order;
6460 break;
6461 } else if (ret < 0) {
6462 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6463 p);
6464 break;
6465 }
6466
6467 if (*p != ';' && *p != ',') {
6468 /* End of param or invalid format */
6469 break;
6470 }
6471 p++;
6472 }
6473 out:
6474 spin_unlock(&resource_alignment_lock);
6475 return align;
6476 }
6477
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6478 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6479 resource_size_t align, bool resize)
6480 {
6481 struct resource *r = &dev->resource[bar];
6482 const char *r_name = pci_resource_name(dev, bar);
6483 resource_size_t size;
6484
6485 if (!(r->flags & IORESOURCE_MEM))
6486 return;
6487
6488 if (r->flags & IORESOURCE_PCI_FIXED) {
6489 pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
6490 r_name, r, (unsigned long long)align);
6491 return;
6492 }
6493
6494 size = resource_size(r);
6495 if (size >= align)
6496 return;
6497
6498 /*
6499 * Increase the alignment of the resource. There are two ways we
6500 * can do this:
6501 *
6502 * 1) Increase the size of the resource. BARs are aligned on their
6503 * size, so when we reallocate space for this resource, we'll
6504 * allocate it with the larger alignment. This also prevents
6505 * assignment of any other BARs inside the alignment region, so
6506 * if we're requesting page alignment, this means no other BARs
6507 * will share the page.
6508 *
6509 * The disadvantage is that this makes the resource larger than
6510 * the hardware BAR, which may break drivers that compute things
6511 * based on the resource size, e.g., to find registers at a
6512 * fixed offset before the end of the BAR.
6513 *
6514 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6515 * set r->start to the desired alignment. By itself this
6516 * doesn't prevent other BARs being put inside the alignment
6517 * region, but if we realign *every* resource of every device in
6518 * the system, none of them will share an alignment region.
6519 *
6520 * When the user has requested alignment for only some devices via
6521 * the "pci=resource_alignment" argument, "resize" is true and we
6522 * use the first method. Otherwise we assume we're aligning all
6523 * devices and we use the second.
6524 */
6525
6526 pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
6527 r_name, r, (unsigned long long)align);
6528
6529 if (resize) {
6530 r->start = 0;
6531 r->end = align - 1;
6532 } else {
6533 r->flags &= ~IORESOURCE_SIZEALIGN;
6534 r->flags |= IORESOURCE_STARTALIGN;
6535 resource_set_range(r, align, size);
6536 }
6537 r->flags |= IORESOURCE_UNSET;
6538 }
6539
6540 /*
6541 * This function disables memory decoding and releases memory resources
6542 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6543 * It also rounds up size to specified alignment.
6544 * Later on, the kernel will assign page-aligned memory resource back
6545 * to the device.
6546 */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6547 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6548 {
6549 int i;
6550 struct resource *r;
6551 resource_size_t align;
6552 u16 command;
6553 bool resize = false;
6554
6555 /*
6556 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6557 * 3.4.1.11. Their resources are allocated from the space
6558 * described by the VF BARx register in the PF's SR-IOV capability.
6559 * We can't influence their alignment here.
6560 */
6561 if (dev->is_virtfn)
6562 return;
6563
6564 /* check if specified PCI is target device to reassign */
6565 align = pci_specified_resource_alignment(dev, &resize);
6566 if (!align)
6567 return;
6568
6569 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6570 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6571 pci_warn(dev, "Can't reassign resources to host bridge\n");
6572 return;
6573 }
6574
6575 pci_read_config_word(dev, PCI_COMMAND, &command);
6576 command &= ~PCI_COMMAND_MEMORY;
6577 pci_write_config_word(dev, PCI_COMMAND, command);
6578
6579 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6580 pci_request_resource_alignment(dev, i, align, resize);
6581
6582 /*
6583 * Need to disable bridge's resource window,
6584 * to enable the kernel to reassign new resource
6585 * window later on.
6586 */
6587 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6588 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6589 r = &dev->resource[i];
6590 if (!(r->flags & IORESOURCE_MEM))
6591 continue;
6592 r->flags |= IORESOURCE_UNSET;
6593 r->end = resource_size(r) - 1;
6594 r->start = 0;
6595 }
6596 pci_disable_bridge_window(dev);
6597 }
6598 }
6599
resource_alignment_show(const struct bus_type * bus,char * buf)6600 static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf)
6601 {
6602 size_t count = 0;
6603
6604 spin_lock(&resource_alignment_lock);
6605 if (resource_alignment_param)
6606 count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6607 spin_unlock(&resource_alignment_lock);
6608
6609 return count;
6610 }
6611
resource_alignment_store(const struct bus_type * bus,const char * buf,size_t count)6612 static ssize_t resource_alignment_store(const struct bus_type *bus,
6613 const char *buf, size_t count)
6614 {
6615 char *param, *old, *end;
6616
6617 if (count >= (PAGE_SIZE - 1))
6618 return -EINVAL;
6619
6620 param = kstrndup(buf, count, GFP_KERNEL);
6621 if (!param)
6622 return -ENOMEM;
6623
6624 end = strchr(param, '\n');
6625 if (end)
6626 *end = '\0';
6627
6628 spin_lock(&resource_alignment_lock);
6629 old = resource_alignment_param;
6630 if (strlen(param)) {
6631 resource_alignment_param = param;
6632 } else {
6633 kfree(param);
6634 resource_alignment_param = NULL;
6635 }
6636 spin_unlock(&resource_alignment_lock);
6637
6638 kfree(old);
6639
6640 return count;
6641 }
6642
6643 static BUS_ATTR_RW(resource_alignment);
6644
pci_resource_alignment_sysfs_init(void)6645 static int __init pci_resource_alignment_sysfs_init(void)
6646 {
6647 return bus_create_file(&pci_bus_type,
6648 &bus_attr_resource_alignment);
6649 }
6650 late_initcall(pci_resource_alignment_sysfs_init);
6651
pci_no_domains(void)6652 static void pci_no_domains(void)
6653 {
6654 #ifdef CONFIG_PCI_DOMAINS
6655 pci_domains_supported = 0;
6656 #endif
6657 }
6658
6659 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6660 static DEFINE_IDA(pci_domain_nr_static_ida);
6661 static DEFINE_IDA(pci_domain_nr_dynamic_ida);
6662
of_pci_reserve_static_domain_nr(void)6663 static void of_pci_reserve_static_domain_nr(void)
6664 {
6665 struct device_node *np;
6666 int domain_nr;
6667
6668 for_each_node_by_type(np, "pci") {
6669 domain_nr = of_get_pci_domain_nr(np);
6670 if (domain_nr < 0)
6671 continue;
6672 /*
6673 * Permanently allocate domain_nr in dynamic_ida
6674 * to prevent it from dynamic allocation.
6675 */
6676 ida_alloc_range(&pci_domain_nr_dynamic_ida,
6677 domain_nr, domain_nr, GFP_KERNEL);
6678 }
6679 }
6680
of_pci_bus_find_domain_nr(struct device * parent)6681 static int of_pci_bus_find_domain_nr(struct device *parent)
6682 {
6683 static bool static_domains_reserved = false;
6684 int domain_nr;
6685
6686 /* On the first call scan device tree for static allocations. */
6687 if (!static_domains_reserved) {
6688 of_pci_reserve_static_domain_nr();
6689 static_domains_reserved = true;
6690 }
6691
6692 if (parent) {
6693 /*
6694 * If domain is in DT, allocate it in static IDA. This
6695 * prevents duplicate static allocations in case of errors
6696 * in DT.
6697 */
6698 domain_nr = of_get_pci_domain_nr(parent->of_node);
6699 if (domain_nr >= 0)
6700 return ida_alloc_range(&pci_domain_nr_static_ida,
6701 domain_nr, domain_nr,
6702 GFP_KERNEL);
6703 }
6704
6705 /*
6706 * If domain was not specified in DT, choose a free ID from dynamic
6707 * allocations. All domain numbers from DT are permanently in
6708 * dynamic allocations to prevent assigning them to other DT nodes
6709 * without static domain.
6710 */
6711 return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
6712 }
6713
of_pci_bus_release_domain_nr(struct device * parent,int domain_nr)6714 static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6715 {
6716 if (domain_nr < 0)
6717 return;
6718
6719 /* Release domain from IDA where it was allocated. */
6720 if (of_get_pci_domain_nr(parent->of_node) == domain_nr)
6721 ida_free(&pci_domain_nr_static_ida, domain_nr);
6722 else
6723 ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
6724 }
6725
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6726 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6727 {
6728 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6729 acpi_pci_bus_find_domain_nr(bus);
6730 }
6731
pci_bus_release_domain_nr(struct device * parent,int domain_nr)6732 void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6733 {
6734 if (!acpi_disabled)
6735 return;
6736 of_pci_bus_release_domain_nr(parent, domain_nr);
6737 }
6738 #endif
6739
6740 /**
6741 * pci_ext_cfg_avail - can we access extended PCI config space?
6742 *
6743 * Returns 1 if we can access PCI extended config space (offsets
6744 * greater than 0xff). This is the default implementation. Architecture
6745 * implementations can override this.
6746 */
pci_ext_cfg_avail(void)6747 int __weak pci_ext_cfg_avail(void)
6748 {
6749 return 1;
6750 }
6751
pci_setup(char * str)6752 static int __init pci_setup(char *str)
6753 {
6754 while (str) {
6755 char *k = strchr(str, ',');
6756 if (k)
6757 *k++ = 0;
6758 if (*str && (str = pcibios_setup(str)) && *str) {
6759 if (!strcmp(str, "nomsi")) {
6760 pci_no_msi();
6761 } else if (!strncmp(str, "noats", 5)) {
6762 pr_info("PCIe: ATS is disabled\n");
6763 pcie_ats_disabled = true;
6764 } else if (!strcmp(str, "noaer")) {
6765 pci_no_aer();
6766 } else if (!strcmp(str, "earlydump")) {
6767 pci_early_dump = true;
6768 } else if (!strncmp(str, "realloc=", 8)) {
6769 pci_realloc_get_opt(str + 8);
6770 } else if (!strncmp(str, "realloc", 7)) {
6771 pci_realloc_get_opt("on");
6772 } else if (!strcmp(str, "nodomains")) {
6773 pci_no_domains();
6774 } else if (!strncmp(str, "noari", 5)) {
6775 pcie_ari_disabled = true;
6776 } else if (!strncmp(str, "notph", 5)) {
6777 pci_no_tph();
6778 } else if (!strncmp(str, "cbiosize=", 9)) {
6779 pci_cardbus_io_size = memparse(str + 9, &str);
6780 } else if (!strncmp(str, "cbmemsize=", 10)) {
6781 pci_cardbus_mem_size = memparse(str + 10, &str);
6782 } else if (!strncmp(str, "resource_alignment=", 19)) {
6783 resource_alignment_param = str + 19;
6784 } else if (!strncmp(str, "ecrc=", 5)) {
6785 pcie_ecrc_get_policy(str + 5);
6786 } else if (!strncmp(str, "hpiosize=", 9)) {
6787 pci_hotplug_io_size = memparse(str + 9, &str);
6788 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6789 pci_hotplug_mmio_size = memparse(str + 11, &str);
6790 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6791 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6792 } else if (!strncmp(str, "hpmemsize=", 10)) {
6793 pci_hotplug_mmio_size = memparse(str + 10, &str);
6794 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6795 } else if (!strncmp(str, "hpbussize=", 10)) {
6796 pci_hotplug_bus_size =
6797 simple_strtoul(str + 10, &str, 0);
6798 if (pci_hotplug_bus_size > 0xff)
6799 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6800 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6801 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6802 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6803 pcie_bus_config = PCIE_BUS_SAFE;
6804 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6805 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6806 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6807 pcie_bus_config = PCIE_BUS_PEER2PEER;
6808 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6809 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6810 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6811 disable_acs_redir_param = str + 18;
6812 } else if (!strncmp(str, "config_acs=", 11)) {
6813 config_acs_param = str + 11;
6814 } else {
6815 pr_err("PCI: Unknown option `%s'\n", str);
6816 }
6817 }
6818 str = k;
6819 }
6820 return 0;
6821 }
6822 early_param("pci", pci_setup);
6823
6824 /*
6825 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6826 * in pci_setup(), above, to point to data in the __initdata section which
6827 * will be freed after the init sequence is complete. We can't allocate memory
6828 * in pci_setup() because some architectures do not have any memory allocation
6829 * service available during an early_param() call. So we allocate memory and
6830 * copy the variable here before the init section is freed.
6831 *
6832 */
pci_realloc_setup_params(void)6833 static int __init pci_realloc_setup_params(void)
6834 {
6835 resource_alignment_param = kstrdup(resource_alignment_param,
6836 GFP_KERNEL);
6837 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6838 config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
6839
6840 return 0;
6841 }
6842 pure_initcall(pci_realloc_setup_params);
6843