1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI support in ACPI
4 *
5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
7 * Copyright (C) 2004 Intel Corp.
8 */
9
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/irqdomain.h>
13 #include <linux/pci.h>
14 #include <linux/msi.h>
15 #include <linux/pci_hotplug.h>
16 #include <linux/module.h>
17 #include <linux/pci-acpi.h>
18 #include <linux/pci-ecam.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/pm_qos.h>
21 #include <linux/rwsem.h>
22 #include "pci.h"
23
24 /*
25 * The GUID is defined in the PCI Firmware Specification available
26 * here to PCI-SIG members:
27 * https://members.pcisig.com/wg/PCI-SIG/document/15350
28 */
29 const guid_t pci_acpi_dsm_guid =
30 GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
31 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
32
33 #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
acpi_get_rc_addr(struct acpi_device * adev,struct resource * res)34 static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
35 {
36 struct device *dev = &adev->dev;
37 struct resource_entry *entry;
38 struct list_head list;
39 unsigned long flags;
40 int ret;
41
42 INIT_LIST_HEAD(&list);
43 flags = IORESOURCE_MEM;
44 ret = acpi_dev_get_resources(adev, &list,
45 acpi_dev_filter_resource_type_cb,
46 (void *) flags);
47 if (ret < 0) {
48 dev_err(dev, "failed to parse _CRS method, error code %d\n",
49 ret);
50 return ret;
51 }
52
53 if (ret == 0) {
54 dev_err(dev, "no IO and memory resources present in _CRS\n");
55 return -EINVAL;
56 }
57
58 entry = list_first_entry(&list, struct resource_entry, node);
59 *res = *entry->res;
60 acpi_dev_free_resource_list(&list);
61 return 0;
62 }
63
acpi_match_rc(acpi_handle handle,u32 lvl,void * context,void ** retval)64 static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
65 void **retval)
66 {
67 u16 *segment = context;
68 unsigned long long uid;
69 acpi_status status;
70
71 status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid);
72 if (ACPI_FAILURE(status) || uid != *segment)
73 return AE_CTRL_DEPTH;
74
75 *(acpi_handle *)retval = handle;
76 return AE_CTRL_TERMINATE;
77 }
78
acpi_get_rc_resources(struct device * dev,const char * hid,u16 segment,struct resource * res)79 int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
80 struct resource *res)
81 {
82 struct acpi_device *adev;
83 acpi_status status;
84 acpi_handle handle;
85 int ret;
86
87 status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
88 if (ACPI_FAILURE(status)) {
89 dev_err(dev, "can't find _HID %s device to locate resources\n",
90 hid);
91 return -ENODEV;
92 }
93
94 adev = acpi_fetch_acpi_dev(handle);
95 if (!adev)
96 return -ENODEV;
97
98 ret = acpi_get_rc_addr(adev, res);
99 if (ret) {
100 dev_err(dev, "can't get resource from %s\n",
101 dev_name(&adev->dev));
102 return ret;
103 }
104
105 return 0;
106 }
107 #endif
108
acpi_pci_root_get_mcfg_addr(acpi_handle handle)109 phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
110 {
111 acpi_status status = AE_NOT_EXIST;
112 unsigned long long mcfg_addr;
113
114 if (handle)
115 status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
116 NULL, &mcfg_addr);
117 if (ACPI_FAILURE(status))
118 return 0;
119
120 return (phys_addr_t)mcfg_addr;
121 }
122
pci_acpi_preserve_config(struct pci_host_bridge * host_bridge)123 bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
124 {
125 bool ret = false;
126
127 if (ACPI_HANDLE(&host_bridge->dev)) {
128 union acpi_object *obj;
129
130 /*
131 * Evaluate the "PCI Boot Configuration" _DSM Function. If it
132 * exists and returns 0, we must preserve any PCI resource
133 * assignments made by firmware for this host bridge.
134 */
135 obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(&host_bridge->dev),
136 &pci_acpi_dsm_guid,
137 1, DSM_PCI_PRESERVE_BOOT_CONFIG,
138 NULL, ACPI_TYPE_INTEGER);
139 if (obj && obj->integer.value == 0)
140 ret = true;
141 ACPI_FREE(obj);
142 }
143
144 return ret;
145 }
146
147 /* _HPX PCI Setting Record (Type 0); same as _HPP */
148 struct hpx_type0 {
149 u32 revision; /* Not present in _HPP */
150 u8 cache_line_size; /* Not applicable to PCIe */
151 u8 latency_timer; /* Not applicable to PCIe */
152 u8 enable_serr;
153 u8 enable_perr;
154 };
155
156 static struct hpx_type0 pci_default_type0 = {
157 .revision = 1,
158 .cache_line_size = 8,
159 .latency_timer = 0x40,
160 .enable_serr = 0,
161 .enable_perr = 0,
162 };
163
program_hpx_type0(struct pci_dev * dev,struct hpx_type0 * hpx)164 static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx)
165 {
166 u16 pci_cmd, pci_bctl;
167
168 if (!hpx)
169 hpx = &pci_default_type0;
170
171 if (hpx->revision > 1) {
172 pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
173 hpx->revision);
174 hpx = &pci_default_type0;
175 }
176
177 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size);
178 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer);
179 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
180 if (hpx->enable_serr)
181 pci_cmd |= PCI_COMMAND_SERR;
182 if (hpx->enable_perr)
183 pci_cmd |= PCI_COMMAND_PARITY;
184 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
185
186 /* Program bridge control value */
187 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
188 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
189 hpx->latency_timer);
190 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
191 if (hpx->enable_perr)
192 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
193 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
194 }
195 }
196
decode_type0_hpx_record(union acpi_object * record,struct hpx_type0 * hpx0)197 static acpi_status decode_type0_hpx_record(union acpi_object *record,
198 struct hpx_type0 *hpx0)
199 {
200 int i;
201 union acpi_object *fields = record->package.elements;
202 u32 revision = fields[1].integer.value;
203
204 switch (revision) {
205 case 1:
206 if (record->package.count != 6)
207 return AE_ERROR;
208 for (i = 2; i < 6; i++)
209 if (fields[i].type != ACPI_TYPE_INTEGER)
210 return AE_ERROR;
211 hpx0->revision = revision;
212 hpx0->cache_line_size = fields[2].integer.value;
213 hpx0->latency_timer = fields[3].integer.value;
214 hpx0->enable_serr = fields[4].integer.value;
215 hpx0->enable_perr = fields[5].integer.value;
216 break;
217 default:
218 pr_warn("%s: Type 0 Revision %d record not supported\n",
219 __func__, revision);
220 return AE_ERROR;
221 }
222 return AE_OK;
223 }
224
225 /* _HPX PCI-X Setting Record (Type 1) */
226 struct hpx_type1 {
227 u32 revision;
228 u8 max_mem_read;
229 u8 avg_max_split;
230 u16 tot_max_split;
231 };
232
program_hpx_type1(struct pci_dev * dev,struct hpx_type1 * hpx)233 static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx)
234 {
235 int pos;
236
237 if (!hpx)
238 return;
239
240 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
241 if (!pos)
242 return;
243
244 pci_warn(dev, "PCI-X settings not supported\n");
245 }
246
decode_type1_hpx_record(union acpi_object * record,struct hpx_type1 * hpx1)247 static acpi_status decode_type1_hpx_record(union acpi_object *record,
248 struct hpx_type1 *hpx1)
249 {
250 int i;
251 union acpi_object *fields = record->package.elements;
252 u32 revision = fields[1].integer.value;
253
254 switch (revision) {
255 case 1:
256 if (record->package.count != 5)
257 return AE_ERROR;
258 for (i = 2; i < 5; i++)
259 if (fields[i].type != ACPI_TYPE_INTEGER)
260 return AE_ERROR;
261 hpx1->revision = revision;
262 hpx1->max_mem_read = fields[2].integer.value;
263 hpx1->avg_max_split = fields[3].integer.value;
264 hpx1->tot_max_split = fields[4].integer.value;
265 break;
266 default:
267 pr_warn("%s: Type 1 Revision %d record not supported\n",
268 __func__, revision);
269 return AE_ERROR;
270 }
271 return AE_OK;
272 }
273
pcie_root_rcb_set(struct pci_dev * dev)274 static bool pcie_root_rcb_set(struct pci_dev *dev)
275 {
276 struct pci_dev *rp = pcie_find_root_port(dev);
277 u16 lnkctl;
278
279 if (!rp)
280 return false;
281
282 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
283 if (lnkctl & PCI_EXP_LNKCTL_RCB)
284 return true;
285
286 return false;
287 }
288
289 /* _HPX PCI Express Setting Record (Type 2) */
290 struct hpx_type2 {
291 u32 revision;
292 u32 unc_err_mask_and;
293 u32 unc_err_mask_or;
294 u32 unc_err_sever_and;
295 u32 unc_err_sever_or;
296 u32 cor_err_mask_and;
297 u32 cor_err_mask_or;
298 u32 adv_err_cap_and;
299 u32 adv_err_cap_or;
300 u16 pci_exp_devctl_and;
301 u16 pci_exp_devctl_or;
302 u16 pci_exp_lnkctl_and;
303 u16 pci_exp_lnkctl_or;
304 u32 sec_unc_err_sever_and;
305 u32 sec_unc_err_sever_or;
306 u32 sec_unc_err_mask_and;
307 u32 sec_unc_err_mask_or;
308 };
309
program_hpx_type2(struct pci_dev * dev,struct hpx_type2 * hpx)310 static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx)
311 {
312 int pos;
313 u32 reg32;
314
315 if (!hpx)
316 return;
317
318 if (!pci_is_pcie(dev))
319 return;
320
321 if (hpx->revision > 1) {
322 pci_warn(dev, "PCIe settings rev %d not supported\n",
323 hpx->revision);
324 return;
325 }
326
327 /*
328 * Don't allow _HPX to change MPS or MRRS settings. We manage
329 * those to make sure they're consistent with the rest of the
330 * platform.
331 */
332 hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
333 PCI_EXP_DEVCTL_READRQ;
334 hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
335 PCI_EXP_DEVCTL_READRQ);
336
337 /* Initialize Device Control Register */
338 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
339 ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or);
340
341 /* Initialize Link Control Register */
342 if (pcie_cap_has_lnkctl(dev)) {
343
344 /*
345 * If the Root Port supports Read Completion Boundary of
346 * 128, set RCB to 128. Otherwise, clear it.
347 */
348 hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
349 hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
350 if (pcie_root_rcb_set(dev))
351 hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
352
353 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
354 ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or);
355 }
356
357 /* Find Advanced Error Reporting Enhanced Capability */
358 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
359 if (!pos)
360 return;
361
362 /* Initialize Uncorrectable Error Mask Register */
363 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32);
364 reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or;
365 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
366
367 /* Initialize Uncorrectable Error Severity Register */
368 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32);
369 reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or;
370 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
371
372 /* Initialize Correctable Error Mask Register */
373 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32);
374 reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or;
375 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
376
377 /* Initialize Advanced Error Capabilities and Control Register */
378 pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32);
379 reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or;
380
381 /* Don't enable ECRC generation or checking if unsupported */
382 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
383 reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
384 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
385 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
386 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
387
388 /*
389 * FIXME: The following two registers are not supported yet.
390 *
391 * o Secondary Uncorrectable Error Severity Register
392 * o Secondary Uncorrectable Error Mask Register
393 */
394 }
395
decode_type2_hpx_record(union acpi_object * record,struct hpx_type2 * hpx2)396 static acpi_status decode_type2_hpx_record(union acpi_object *record,
397 struct hpx_type2 *hpx2)
398 {
399 int i;
400 union acpi_object *fields = record->package.elements;
401 u32 revision = fields[1].integer.value;
402
403 switch (revision) {
404 case 1:
405 if (record->package.count != 18)
406 return AE_ERROR;
407 for (i = 2; i < 18; i++)
408 if (fields[i].type != ACPI_TYPE_INTEGER)
409 return AE_ERROR;
410 hpx2->revision = revision;
411 hpx2->unc_err_mask_and = fields[2].integer.value;
412 hpx2->unc_err_mask_or = fields[3].integer.value;
413 hpx2->unc_err_sever_and = fields[4].integer.value;
414 hpx2->unc_err_sever_or = fields[5].integer.value;
415 hpx2->cor_err_mask_and = fields[6].integer.value;
416 hpx2->cor_err_mask_or = fields[7].integer.value;
417 hpx2->adv_err_cap_and = fields[8].integer.value;
418 hpx2->adv_err_cap_or = fields[9].integer.value;
419 hpx2->pci_exp_devctl_and = fields[10].integer.value;
420 hpx2->pci_exp_devctl_or = fields[11].integer.value;
421 hpx2->pci_exp_lnkctl_and = fields[12].integer.value;
422 hpx2->pci_exp_lnkctl_or = fields[13].integer.value;
423 hpx2->sec_unc_err_sever_and = fields[14].integer.value;
424 hpx2->sec_unc_err_sever_or = fields[15].integer.value;
425 hpx2->sec_unc_err_mask_and = fields[16].integer.value;
426 hpx2->sec_unc_err_mask_or = fields[17].integer.value;
427 break;
428 default:
429 pr_warn("%s: Type 2 Revision %d record not supported\n",
430 __func__, revision);
431 return AE_ERROR;
432 }
433 return AE_OK;
434 }
435
436 /* _HPX PCI Express Setting Record (Type 3) */
437 struct hpx_type3 {
438 u16 device_type;
439 u16 function_type;
440 u16 config_space_location;
441 u16 pci_exp_cap_id;
442 u16 pci_exp_cap_ver;
443 u16 pci_exp_vendor_id;
444 u16 dvsec_id;
445 u16 dvsec_rev;
446 u16 match_offset;
447 u32 match_mask_and;
448 u32 match_value;
449 u16 reg_offset;
450 u32 reg_mask_and;
451 u32 reg_mask_or;
452 };
453
454 enum hpx_type3_dev_type {
455 HPX_TYPE_ENDPOINT = BIT(0),
456 HPX_TYPE_LEG_END = BIT(1),
457 HPX_TYPE_RC_END = BIT(2),
458 HPX_TYPE_RC_EC = BIT(3),
459 HPX_TYPE_ROOT_PORT = BIT(4),
460 HPX_TYPE_UPSTREAM = BIT(5),
461 HPX_TYPE_DOWNSTREAM = BIT(6),
462 HPX_TYPE_PCI_BRIDGE = BIT(7),
463 HPX_TYPE_PCIE_BRIDGE = BIT(8),
464 };
465
hpx3_device_type(struct pci_dev * dev)466 static u16 hpx3_device_type(struct pci_dev *dev)
467 {
468 u16 pcie_type = pci_pcie_type(dev);
469 static const int pcie_to_hpx3_type[] = {
470 [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT,
471 [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END,
472 [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END,
473 [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC,
474 [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT,
475 [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM,
476 [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM,
477 [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE,
478 [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
479 };
480
481 if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
482 return 0;
483
484 return pcie_to_hpx3_type[pcie_type];
485 }
486
487 enum hpx_type3_fn_type {
488 HPX_FN_NORMAL = BIT(0),
489 HPX_FN_SRIOV_PHYS = BIT(1),
490 HPX_FN_SRIOV_VIRT = BIT(2),
491 };
492
hpx3_function_type(struct pci_dev * dev)493 static u8 hpx3_function_type(struct pci_dev *dev)
494 {
495 if (dev->is_virtfn)
496 return HPX_FN_SRIOV_VIRT;
497 else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
498 return HPX_FN_SRIOV_PHYS;
499 else
500 return HPX_FN_NORMAL;
501 }
502
hpx3_cap_ver_matches(u8 pcie_cap_id,u8 hpx3_cap_id)503 static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
504 {
505 u8 cap_ver = hpx3_cap_id & 0xf;
506
507 if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
508 return true;
509 else if (cap_ver == pcie_cap_id)
510 return true;
511
512 return false;
513 }
514
515 enum hpx_type3_cfg_loc {
516 HPX_CFG_PCICFG = 0,
517 HPX_CFG_PCIE_CAP = 1,
518 HPX_CFG_PCIE_CAP_EXT = 2,
519 HPX_CFG_VEND_CAP = 3,
520 HPX_CFG_DVSEC = 4,
521 HPX_CFG_MAX,
522 };
523
program_hpx_type3_register(struct pci_dev * dev,const struct hpx_type3 * reg)524 static void program_hpx_type3_register(struct pci_dev *dev,
525 const struct hpx_type3 *reg)
526 {
527 u32 match_reg, write_reg, header, orig_value;
528 u16 pos;
529
530 if (!(hpx3_device_type(dev) & reg->device_type))
531 return;
532
533 if (!(hpx3_function_type(dev) & reg->function_type))
534 return;
535
536 switch (reg->config_space_location) {
537 case HPX_CFG_PCICFG:
538 pos = 0;
539 break;
540 case HPX_CFG_PCIE_CAP:
541 pos = pci_find_capability(dev, reg->pci_exp_cap_id);
542 if (pos == 0)
543 return;
544
545 break;
546 case HPX_CFG_PCIE_CAP_EXT:
547 pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
548 if (pos == 0)
549 return;
550
551 pci_read_config_dword(dev, pos, &header);
552 if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
553 reg->pci_exp_cap_ver))
554 return;
555
556 break;
557 case HPX_CFG_VEND_CAP:
558 case HPX_CFG_DVSEC:
559 default:
560 pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
561 return;
562 }
563
564 pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
565
566 if ((match_reg & reg->match_mask_and) != reg->match_value)
567 return;
568
569 pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
570 orig_value = write_reg;
571 write_reg &= reg->reg_mask_and;
572 write_reg |= reg->reg_mask_or;
573
574 if (orig_value == write_reg)
575 return;
576
577 pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
578
579 pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
580 pos, orig_value, write_reg);
581 }
582
program_hpx_type3(struct pci_dev * dev,struct hpx_type3 * hpx)583 static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx)
584 {
585 if (!hpx)
586 return;
587
588 if (!pci_is_pcie(dev))
589 return;
590
591 program_hpx_type3_register(dev, hpx);
592 }
593
parse_hpx3_register(struct hpx_type3 * hpx3_reg,union acpi_object * reg_fields)594 static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
595 union acpi_object *reg_fields)
596 {
597 hpx3_reg->device_type = reg_fields[0].integer.value;
598 hpx3_reg->function_type = reg_fields[1].integer.value;
599 hpx3_reg->config_space_location = reg_fields[2].integer.value;
600 hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value;
601 hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value;
602 hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value;
603 hpx3_reg->dvsec_id = reg_fields[6].integer.value;
604 hpx3_reg->dvsec_rev = reg_fields[7].integer.value;
605 hpx3_reg->match_offset = reg_fields[8].integer.value;
606 hpx3_reg->match_mask_and = reg_fields[9].integer.value;
607 hpx3_reg->match_value = reg_fields[10].integer.value;
608 hpx3_reg->reg_offset = reg_fields[11].integer.value;
609 hpx3_reg->reg_mask_and = reg_fields[12].integer.value;
610 hpx3_reg->reg_mask_or = reg_fields[13].integer.value;
611 }
612
program_type3_hpx_record(struct pci_dev * dev,union acpi_object * record)613 static acpi_status program_type3_hpx_record(struct pci_dev *dev,
614 union acpi_object *record)
615 {
616 union acpi_object *fields = record->package.elements;
617 u32 desc_count, expected_length, revision;
618 union acpi_object *reg_fields;
619 struct hpx_type3 hpx3;
620 int i;
621
622 revision = fields[1].integer.value;
623 switch (revision) {
624 case 1:
625 desc_count = fields[2].integer.value;
626 expected_length = 3 + desc_count * 14;
627
628 if (record->package.count != expected_length)
629 return AE_ERROR;
630
631 for (i = 2; i < expected_length; i++)
632 if (fields[i].type != ACPI_TYPE_INTEGER)
633 return AE_ERROR;
634
635 for (i = 0; i < desc_count; i++) {
636 reg_fields = fields + 3 + i * 14;
637 parse_hpx3_register(&hpx3, reg_fields);
638 program_hpx_type3(dev, &hpx3);
639 }
640
641 break;
642 default:
643 printk(KERN_WARNING
644 "%s: Type 3 Revision %d record not supported\n",
645 __func__, revision);
646 return AE_ERROR;
647 }
648 return AE_OK;
649 }
650
acpi_run_hpx(struct pci_dev * dev,acpi_handle handle)651 static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle)
652 {
653 acpi_status status;
654 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
655 union acpi_object *package, *record, *fields;
656 struct hpx_type0 hpx0;
657 struct hpx_type1 hpx1;
658 struct hpx_type2 hpx2;
659 u32 type;
660 int i;
661
662 status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
663 if (ACPI_FAILURE(status))
664 return status;
665
666 package = (union acpi_object *)buffer.pointer;
667 if (package->type != ACPI_TYPE_PACKAGE) {
668 status = AE_ERROR;
669 goto exit;
670 }
671
672 for (i = 0; i < package->package.count; i++) {
673 record = &package->package.elements[i];
674 if (record->type != ACPI_TYPE_PACKAGE) {
675 status = AE_ERROR;
676 goto exit;
677 }
678
679 fields = record->package.elements;
680 if (fields[0].type != ACPI_TYPE_INTEGER ||
681 fields[1].type != ACPI_TYPE_INTEGER) {
682 status = AE_ERROR;
683 goto exit;
684 }
685
686 type = fields[0].integer.value;
687 switch (type) {
688 case 0:
689 memset(&hpx0, 0, sizeof(hpx0));
690 status = decode_type0_hpx_record(record, &hpx0);
691 if (ACPI_FAILURE(status))
692 goto exit;
693 program_hpx_type0(dev, &hpx0);
694 break;
695 case 1:
696 memset(&hpx1, 0, sizeof(hpx1));
697 status = decode_type1_hpx_record(record, &hpx1);
698 if (ACPI_FAILURE(status))
699 goto exit;
700 program_hpx_type1(dev, &hpx1);
701 break;
702 case 2:
703 memset(&hpx2, 0, sizeof(hpx2));
704 status = decode_type2_hpx_record(record, &hpx2);
705 if (ACPI_FAILURE(status))
706 goto exit;
707 program_hpx_type2(dev, &hpx2);
708 break;
709 case 3:
710 status = program_type3_hpx_record(dev, record);
711 if (ACPI_FAILURE(status))
712 goto exit;
713 break;
714 default:
715 pr_err("%s: Type %d record not supported\n",
716 __func__, type);
717 status = AE_ERROR;
718 goto exit;
719 }
720 }
721 exit:
722 kfree(buffer.pointer);
723 return status;
724 }
725
acpi_run_hpp(struct pci_dev * dev,acpi_handle handle)726 static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle)
727 {
728 acpi_status status;
729 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
730 union acpi_object *package, *fields;
731 struct hpx_type0 hpx0;
732 int i;
733
734 memset(&hpx0, 0, sizeof(hpx0));
735
736 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
737 if (ACPI_FAILURE(status))
738 return status;
739
740 package = (union acpi_object *) buffer.pointer;
741 if (package->type != ACPI_TYPE_PACKAGE ||
742 package->package.count != 4) {
743 status = AE_ERROR;
744 goto exit;
745 }
746
747 fields = package->package.elements;
748 for (i = 0; i < 4; i++) {
749 if (fields[i].type != ACPI_TYPE_INTEGER) {
750 status = AE_ERROR;
751 goto exit;
752 }
753 }
754
755 hpx0.revision = 1;
756 hpx0.cache_line_size = fields[0].integer.value;
757 hpx0.latency_timer = fields[1].integer.value;
758 hpx0.enable_serr = fields[2].integer.value;
759 hpx0.enable_perr = fields[3].integer.value;
760
761 program_hpx_type0(dev, &hpx0);
762
763 exit:
764 kfree(buffer.pointer);
765 return status;
766 }
767
768 /* pci_acpi_program_hp_params
769 *
770 * @dev - the pci_dev for which we want parameters
771 */
pci_acpi_program_hp_params(struct pci_dev * dev)772 int pci_acpi_program_hp_params(struct pci_dev *dev)
773 {
774 acpi_status status;
775 acpi_handle handle, phandle;
776 struct pci_bus *pbus;
777
778 if (acpi_pci_disabled)
779 return -ENODEV;
780
781 handle = NULL;
782 for (pbus = dev->bus; pbus; pbus = pbus->parent) {
783 handle = acpi_pci_get_bridge_handle(pbus);
784 if (handle)
785 break;
786 }
787
788 /*
789 * _HPP settings apply to all child buses, until another _HPP is
790 * encountered. If we don't find an _HPP for the input pci dev,
791 * look for it in the parent device scope since that would apply to
792 * this pci dev.
793 */
794 while (handle) {
795 status = acpi_run_hpx(dev, handle);
796 if (ACPI_SUCCESS(status))
797 return 0;
798 status = acpi_run_hpp(dev, handle);
799 if (ACPI_SUCCESS(status))
800 return 0;
801 if (acpi_is_root_bridge(handle))
802 break;
803 status = acpi_get_parent(handle, &phandle);
804 if (ACPI_FAILURE(status))
805 break;
806 handle = phandle;
807 }
808 return -ENODEV;
809 }
810
811 /**
812 * pciehp_is_native - Check whether a hotplug port is handled by the OS
813 * @bridge: Hotplug port to check
814 *
815 * Returns true if the given @bridge is handled by the native PCIe hotplug
816 * driver.
817 */
pciehp_is_native(struct pci_dev * bridge)818 bool pciehp_is_native(struct pci_dev *bridge)
819 {
820 const struct pci_host_bridge *host;
821
822 if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
823 return false;
824
825 if (pcie_ports_native)
826 return true;
827
828 host = pci_find_host_bridge(bridge->bus);
829 return host->native_pcie_hotplug;
830 }
831
832 /**
833 * shpchp_is_native - Check whether a hotplug port is handled by the OS
834 * @bridge: Hotplug port to check
835 *
836 * Returns true if the given @bridge is handled by the native SHPC hotplug
837 * driver.
838 */
shpchp_is_native(struct pci_dev * bridge)839 bool shpchp_is_native(struct pci_dev *bridge)
840 {
841 return bridge->shpc_managed;
842 }
843
844 /**
845 * pci_acpi_wake_bus - Root bus wakeup notification fork function.
846 * @context: Device wakeup context.
847 */
pci_acpi_wake_bus(struct acpi_device_wakeup_context * context)848 static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
849 {
850 struct acpi_device *adev;
851 struct acpi_pci_root *root;
852
853 adev = container_of(context, struct acpi_device, wakeup.context);
854 root = acpi_driver_data(adev);
855 pci_pme_wakeup_bus(root->bus);
856 }
857
858 /**
859 * pci_acpi_wake_dev - PCI device wakeup notification work function.
860 * @context: Device wakeup context.
861 */
pci_acpi_wake_dev(struct acpi_device_wakeup_context * context)862 static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
863 {
864 struct pci_dev *pci_dev;
865
866 pci_dev = to_pci_dev(context->dev);
867
868 if (pci_dev->pme_poll)
869 pci_dev->pme_poll = false;
870
871 if (pci_dev->current_state == PCI_D3cold) {
872 pci_wakeup_event(pci_dev);
873 pm_request_resume(&pci_dev->dev);
874 return;
875 }
876
877 /* Clear PME Status if set. */
878 if (pci_dev->pme_support)
879 pci_check_pme_status(pci_dev);
880
881 pci_wakeup_event(pci_dev);
882 pm_request_resume(&pci_dev->dev);
883
884 pci_pme_wakeup_bus(pci_dev->subordinate);
885 }
886
887 /**
888 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
889 * @dev: PCI root bridge ACPI device.
890 */
pci_acpi_add_bus_pm_notifier(struct acpi_device * dev)891 acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
892 {
893 return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
894 }
895
896 /**
897 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
898 * @dev: ACPI device to add the notifier for.
899 * @pci_dev: PCI device to check for the PME status if an event is signaled.
900 */
pci_acpi_add_pm_notifier(struct acpi_device * dev,struct pci_dev * pci_dev)901 acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
902 struct pci_dev *pci_dev)
903 {
904 return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
905 }
906
907 /*
908 * _SxD returns the D-state with the highest power
909 * (lowest D-state number) supported in the S-state "x".
910 *
911 * If the devices does not have a _PRW
912 * (Power Resources for Wake) supporting system wakeup from "x"
913 * then the OS is free to choose a lower power (higher number
914 * D-state) than the return value from _SxD.
915 *
916 * But if _PRW is enabled at S-state "x", the OS
917 * must not choose a power lower than _SxD --
918 * unless the device has an _SxW method specifying
919 * the lowest power (highest D-state number) the device
920 * may enter while still able to wake the system.
921 *
922 * ie. depending on global OS policy:
923 *
924 * if (_PRW at S-state x)
925 * choose from highest power _SxD to lowest power _SxW
926 * else // no _PRW at S-state x
927 * choose highest power _SxD or any lower power
928 */
929
acpi_pci_choose_state(struct pci_dev * pdev)930 pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
931 {
932 int acpi_state, d_max;
933
934 if (pdev->no_d3cold || !pdev->d3cold_allowed)
935 d_max = ACPI_STATE_D3_HOT;
936 else
937 d_max = ACPI_STATE_D3_COLD;
938 acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
939 if (acpi_state < 0)
940 return PCI_POWER_ERROR;
941
942 switch (acpi_state) {
943 case ACPI_STATE_D0:
944 return PCI_D0;
945 case ACPI_STATE_D1:
946 return PCI_D1;
947 case ACPI_STATE_D2:
948 return PCI_D2;
949 case ACPI_STATE_D3_HOT:
950 return PCI_D3hot;
951 case ACPI_STATE_D3_COLD:
952 return PCI_D3cold;
953 }
954 return PCI_POWER_ERROR;
955 }
956
957 static struct acpi_device *acpi_pci_find_companion(struct device *dev);
958
pci_set_acpi_fwnode(struct pci_dev * dev)959 void pci_set_acpi_fwnode(struct pci_dev *dev)
960 {
961 if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev))
962 ACPI_COMPANION_SET(&dev->dev,
963 acpi_pci_find_companion(&dev->dev));
964 }
965
966 /**
967 * pci_dev_acpi_reset - do a function level reset using _RST method
968 * @dev: device to reset
969 * @probe: if true, return 0 if device supports _RST
970 */
pci_dev_acpi_reset(struct pci_dev * dev,bool probe)971 int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
972 {
973 acpi_handle handle = ACPI_HANDLE(&dev->dev);
974
975 if (!handle || !acpi_has_method(handle, "_RST"))
976 return -ENOTTY;
977
978 if (probe)
979 return 0;
980
981 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, NULL))) {
982 pci_warn(dev, "ACPI _RST failed\n");
983 return -ENOTTY;
984 }
985
986 return 0;
987 }
988
acpi_pci_power_manageable(struct pci_dev * dev)989 bool acpi_pci_power_manageable(struct pci_dev *dev)
990 {
991 struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
992
993 return adev && acpi_device_power_manageable(adev);
994 }
995
acpi_pci_bridge_d3(struct pci_dev * dev)996 bool acpi_pci_bridge_d3(struct pci_dev *dev)
997 {
998 struct pci_dev *rpdev;
999 struct acpi_device *adev, *rpadev;
1000 const union acpi_object *obj;
1001
1002 if (acpi_pci_disabled || !dev->is_pciehp)
1003 return false;
1004
1005 adev = ACPI_COMPANION(&dev->dev);
1006 if (adev) {
1007 /*
1008 * If the bridge has _S0W, whether or not it can go into D3
1009 * depends on what is returned by that object. In particular,
1010 * if the power state returned by _S0W is D2 or shallower,
1011 * entering D3 should not be allowed.
1012 */
1013 if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2)
1014 return false;
1015
1016 /*
1017 * Otherwise, assume that the bridge can enter D3 so long as it
1018 * is power-manageable via ACPI.
1019 */
1020 if (acpi_device_power_manageable(adev))
1021 return true;
1022 }
1023
1024 rpdev = pcie_find_root_port(dev);
1025 if (!rpdev)
1026 return false;
1027
1028 if (rpdev == dev)
1029 rpadev = adev;
1030 else
1031 rpadev = ACPI_COMPANION(&rpdev->dev);
1032
1033 if (!rpadev)
1034 return false;
1035
1036 /*
1037 * If the Root Port cannot signal wakeup signals at all, i.e., it
1038 * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug
1039 * events from low-power states including D3hot and D3cold.
1040 */
1041 if (!rpadev->wakeup.flags.valid)
1042 return false;
1043
1044 /*
1045 * In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port
1046 * to verify whether or not it can signal wakeup from D3.
1047 */
1048 if (rpadev != adev &&
1049 acpi_dev_power_state_for_wake(rpadev) <= ACPI_STATE_D2)
1050 return false;
1051
1052 /*
1053 * The "HotPlugSupportInD3" property in a Root Port _DSD indicates
1054 * the Port can signal hotplug events while in D3. We assume any
1055 * bridges *below* that Root Port can also signal hotplug events
1056 * while in D3.
1057 */
1058 if (!acpi_dev_get_property(rpadev, "HotPlugSupportInD3",
1059 ACPI_TYPE_INTEGER, &obj) &&
1060 obj->integer.value == 1)
1061 return true;
1062
1063 return false;
1064 }
1065
acpi_pci_config_space_access(struct pci_dev * dev,bool enable)1066 static void acpi_pci_config_space_access(struct pci_dev *dev, bool enable)
1067 {
1068 int val = enable ? ACPI_REG_CONNECT : ACPI_REG_DISCONNECT;
1069 int ret = acpi_evaluate_reg(ACPI_HANDLE(&dev->dev),
1070 ACPI_ADR_SPACE_PCI_CONFIG, val);
1071 if (ret)
1072 pci_dbg(dev, "ACPI _REG %s evaluation failed (%d)\n",
1073 enable ? "connect" : "disconnect", ret);
1074 }
1075
acpi_pci_set_power_state(struct pci_dev * dev,pci_power_t state)1076 int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1077 {
1078 struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1079 static const u8 state_conv[] = {
1080 [PCI_D0] = ACPI_STATE_D0,
1081 [PCI_D1] = ACPI_STATE_D1,
1082 [PCI_D2] = ACPI_STATE_D2,
1083 [PCI_D3hot] = ACPI_STATE_D3_HOT,
1084 [PCI_D3cold] = ACPI_STATE_D3_COLD,
1085 };
1086 int error;
1087
1088 /* If the ACPI device has _EJ0, ignore the device */
1089 if (!adev || acpi_has_method(adev->handle, "_EJ0"))
1090 return -ENODEV;
1091
1092 switch (state) {
1093 case PCI_D0:
1094 case PCI_D1:
1095 case PCI_D2:
1096 case PCI_D3hot:
1097 case PCI_D3cold:
1098 break;
1099 default:
1100 return -EINVAL;
1101 }
1102
1103 if (state == PCI_D3cold) {
1104 if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
1105 PM_QOS_FLAGS_ALL)
1106 return -EBUSY;
1107
1108 /* Notify AML lack of PCI config space availability */
1109 acpi_pci_config_space_access(dev, false);
1110 }
1111
1112 error = acpi_device_set_power(adev, state_conv[state]);
1113 if (error)
1114 return error;
1115
1116 pci_dbg(dev, "power state changed by ACPI to %s\n",
1117 acpi_power_state_string(adev->power.state));
1118
1119 /*
1120 * Notify AML of PCI config space availability. Config space is
1121 * accessible in all states except D3cold; the only transitions
1122 * that change availability are transitions to D3cold and from
1123 * D3cold to D0.
1124 */
1125 if (state == PCI_D0)
1126 acpi_pci_config_space_access(dev, true);
1127
1128 return 0;
1129 }
1130
acpi_pci_get_power_state(struct pci_dev * dev)1131 pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
1132 {
1133 struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1134 static const pci_power_t state_conv[] = {
1135 [ACPI_STATE_D0] = PCI_D0,
1136 [ACPI_STATE_D1] = PCI_D1,
1137 [ACPI_STATE_D2] = PCI_D2,
1138 [ACPI_STATE_D3_HOT] = PCI_D3hot,
1139 [ACPI_STATE_D3_COLD] = PCI_D3cold,
1140 };
1141 int state;
1142
1143 if (!adev || !acpi_device_power_manageable(adev))
1144 return PCI_UNKNOWN;
1145
1146 state = adev->power.state;
1147 if (state == ACPI_STATE_UNKNOWN)
1148 return PCI_UNKNOWN;
1149
1150 return state_conv[state];
1151 }
1152
acpi_pci_refresh_power_state(struct pci_dev * dev)1153 void acpi_pci_refresh_power_state(struct pci_dev *dev)
1154 {
1155 struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1156
1157 if (adev && acpi_device_power_manageable(adev))
1158 acpi_device_update_power(adev, NULL);
1159 }
1160
acpi_pci_propagate_wakeup(struct pci_bus * bus,bool enable)1161 static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
1162 {
1163 while (bus->parent) {
1164 if (acpi_pm_device_can_wakeup(&bus->self->dev))
1165 return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
1166
1167 bus = bus->parent;
1168 }
1169
1170 /* We have reached the root bus. */
1171 if (bus->bridge) {
1172 if (acpi_pm_device_can_wakeup(bus->bridge))
1173 return acpi_pm_set_device_wakeup(bus->bridge, enable);
1174 }
1175 return 0;
1176 }
1177
acpi_pci_wakeup(struct pci_dev * dev,bool enable)1178 int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
1179 {
1180 if (acpi_pci_disabled)
1181 return 0;
1182
1183 if (acpi_pm_device_can_wakeup(&dev->dev))
1184 return acpi_pm_set_device_wakeup(&dev->dev, enable);
1185
1186 return acpi_pci_propagate_wakeup(dev->bus, enable);
1187 }
1188
acpi_pci_need_resume(struct pci_dev * dev)1189 bool acpi_pci_need_resume(struct pci_dev *dev)
1190 {
1191 struct acpi_device *adev;
1192
1193 if (acpi_pci_disabled)
1194 return false;
1195
1196 /*
1197 * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
1198 * system-wide suspend/resume confuses the platform firmware, so avoid
1199 * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint
1200 * devices are expected to be in D3 before invoking the S3 entry path
1201 * from the firmware, so they should not be affected by this issue.
1202 */
1203 if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
1204 return true;
1205
1206 adev = ACPI_COMPANION(&dev->dev);
1207 if (!adev || !acpi_device_power_manageable(adev))
1208 return false;
1209
1210 if (adev->wakeup.flags.valid &&
1211 device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
1212 return true;
1213
1214 if (acpi_target_system_state() == ACPI_STATE_S0)
1215 return false;
1216
1217 return !!adev->power.flags.dsw_present;
1218 }
1219
acpi_pci_add_bus(struct pci_bus * bus)1220 void acpi_pci_add_bus(struct pci_bus *bus)
1221 {
1222 union acpi_object *obj;
1223 struct pci_host_bridge *bridge;
1224
1225 if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
1226 return;
1227
1228 acpi_pci_slot_enumerate(bus);
1229 acpiphp_enumerate_slots(bus);
1230
1231 /*
1232 * For a host bridge, check its _DSM for function 8 and if
1233 * that is available, mark it in pci_host_bridge.
1234 */
1235 if (!pci_is_root_bus(bus))
1236 return;
1237
1238 obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
1239 DSM_PCI_POWER_ON_RESET_DELAY, NULL, ACPI_TYPE_INTEGER);
1240 if (!obj)
1241 return;
1242
1243 if (obj->integer.value == 1) {
1244 bridge = pci_find_host_bridge(bus);
1245 bridge->ignore_reset_delay = 1;
1246 }
1247 ACPI_FREE(obj);
1248 }
1249
acpi_pci_remove_bus(struct pci_bus * bus)1250 void acpi_pci_remove_bus(struct pci_bus *bus)
1251 {
1252 if (acpi_pci_disabled || !bus->bridge)
1253 return;
1254
1255 acpiphp_remove_slots(bus);
1256 acpi_pci_slot_remove(bus);
1257 }
1258
1259 /* ACPI bus type */
1260
1261
1262 static DECLARE_RWSEM(pci_acpi_companion_lookup_sem);
1263 static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *);
1264
1265 /**
1266 * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback.
1267 * @func: ACPI companion lookup callback pointer or NULL.
1268 *
1269 * Set a special ACPI companion lookup callback for PCI devices whose companion
1270 * objects in the ACPI namespace have _ADR with non-standard bus-device-function
1271 * encodings.
1272 *
1273 * Return 0 on success or a negative error code on failure (in which case no
1274 * changes are made).
1275 *
1276 * The caller is responsible for the appropriate ordering of the invocations of
1277 * this function with respect to the enumeration of the PCI devices needing the
1278 * callback installed by it.
1279 */
pci_acpi_set_companion_lookup_hook(struct acpi_device * (* func)(struct pci_dev *))1280 int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *))
1281 {
1282 int ret;
1283
1284 if (!func)
1285 return -EINVAL;
1286
1287 down_write(&pci_acpi_companion_lookup_sem);
1288
1289 if (pci_acpi_find_companion_hook) {
1290 ret = -EBUSY;
1291 } else {
1292 pci_acpi_find_companion_hook = func;
1293 ret = 0;
1294 }
1295
1296 up_write(&pci_acpi_companion_lookup_sem);
1297
1298 return ret;
1299 }
1300 EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook);
1301
1302 /**
1303 * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback.
1304 *
1305 * Clear the special ACPI companion lookup callback previously set by
1306 * pci_acpi_set_companion_lookup_hook(). Block until the last running instance
1307 * of the callback returns before clearing it.
1308 *
1309 * The caller is responsible for the appropriate ordering of the invocations of
1310 * this function with respect to the enumeration of the PCI devices needing the
1311 * callback cleared by it.
1312 */
pci_acpi_clear_companion_lookup_hook(void)1313 void pci_acpi_clear_companion_lookup_hook(void)
1314 {
1315 down_write(&pci_acpi_companion_lookup_sem);
1316
1317 pci_acpi_find_companion_hook = NULL;
1318
1319 up_write(&pci_acpi_companion_lookup_sem);
1320 }
1321 EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook);
1322
acpi_pci_find_companion(struct device * dev)1323 static struct acpi_device *acpi_pci_find_companion(struct device *dev)
1324 {
1325 struct pci_dev *pci_dev = to_pci_dev(dev);
1326 struct acpi_device *adev;
1327 bool check_children;
1328 u64 addr;
1329
1330 if (!dev->parent)
1331 return NULL;
1332
1333 down_read(&pci_acpi_companion_lookup_sem);
1334
1335 adev = pci_acpi_find_companion_hook ?
1336 pci_acpi_find_companion_hook(pci_dev) : NULL;
1337
1338 up_read(&pci_acpi_companion_lookup_sem);
1339
1340 if (adev)
1341 return adev;
1342
1343 check_children = pci_is_bridge(pci_dev);
1344 /* Please ref to ACPI spec for the syntax of _ADR */
1345 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
1346 adev = acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
1347 check_children);
1348
1349 /*
1350 * There may be ACPI device objects in the ACPI namespace that are
1351 * children of the device object representing the host bridge, but don't
1352 * represent PCI devices. Both _HID and _ADR may be present for them,
1353 * even though that is against the specification (for example, see
1354 * Section 6.1 of ACPI 6.3), but in many cases the _ADR returns 0 which
1355 * appears to indicate that they should not be taken into consideration
1356 * as potential companions of PCI devices on the root bus.
1357 *
1358 * To catch this special case, disregard the returned device object if
1359 * it has a valid _HID, addr is 0 and the PCI device at hand is on the
1360 * root bus.
1361 */
1362 if (adev && adev->pnp.type.platform_id && !addr &&
1363 pci_is_root_bus(pci_dev->bus))
1364 return NULL;
1365
1366 return adev;
1367 }
1368
1369 /**
1370 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
1371 * @pdev: the PCI device whose delay is to be updated
1372 * @handle: ACPI handle of this device
1373 *
1374 * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM
1375 * control method of either the device itself or the PCI host bridge.
1376 *
1377 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
1378 * host bridge. If it returns one, the OS may assume that all devices in
1379 * the hierarchy have already completed power-on reset delays.
1380 *
1381 * Function 9, "Device Readiness Durations," applies only to the object
1382 * where it is located. It returns delay durations required after various
1383 * events if the device requires less time than the spec requires. Delays
1384 * from this function take precedence over the Reset Delay function.
1385 *
1386 * These _DSM functions are defined by the draft ECN of January 28, 2014,
1387 * titled "ACPI additions for FW latency optimizations."
1388 */
pci_acpi_optimize_delay(struct pci_dev * pdev,acpi_handle handle)1389 static void pci_acpi_optimize_delay(struct pci_dev *pdev,
1390 acpi_handle handle)
1391 {
1392 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
1393 int value;
1394 union acpi_object *obj, *elements;
1395
1396 if (bridge->ignore_reset_delay)
1397 pdev->d3cold_delay = 0;
1398
1399 obj = acpi_evaluate_dsm_typed(handle, &pci_acpi_dsm_guid, 3,
1400 DSM_PCI_DEVICE_READINESS_DURATIONS, NULL,
1401 ACPI_TYPE_PACKAGE);
1402 if (!obj)
1403 return;
1404
1405 if (obj->package.count == 5) {
1406 elements = obj->package.elements;
1407 if (elements[0].type == ACPI_TYPE_INTEGER) {
1408 value = (int)elements[0].integer.value / 1000;
1409 if (value < PCI_PM_D3COLD_WAIT)
1410 pdev->d3cold_delay = value;
1411 }
1412 if (elements[3].type == ACPI_TYPE_INTEGER) {
1413 value = (int)elements[3].integer.value / 1000;
1414 if (value < PCI_PM_D3HOT_WAIT)
1415 pdev->d3hot_delay = value;
1416 }
1417 }
1418 ACPI_FREE(obj);
1419 }
1420
pci_acpi_set_external_facing(struct pci_dev * dev)1421 static void pci_acpi_set_external_facing(struct pci_dev *dev)
1422 {
1423 u8 val;
1424
1425 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1426 return;
1427 if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val))
1428 return;
1429
1430 /*
1431 * These root ports expose PCIe (including DMA) outside of the
1432 * system. Everything downstream from them is external.
1433 */
1434 if (val)
1435 dev->external_facing = 1;
1436 }
1437
pci_acpi_setup(struct device * dev,struct acpi_device * adev)1438 void pci_acpi_setup(struct device *dev, struct acpi_device *adev)
1439 {
1440 struct pci_dev *pci_dev = to_pci_dev(dev);
1441
1442 pci_acpi_optimize_delay(pci_dev, adev->handle);
1443 pci_acpi_set_external_facing(pci_dev);
1444 pci_acpi_add_edr_notifier(pci_dev);
1445
1446 pci_acpi_add_pm_notifier(adev, pci_dev);
1447 if (!adev->wakeup.flags.valid)
1448 return;
1449
1450 device_set_wakeup_capable(dev, true);
1451 /*
1452 * For bridges that can do D3 we enable wake automatically (as
1453 * we do for the power management itself in that case). The
1454 * reason is that the bridge may have additional methods such as
1455 * _DSW that need to be called.
1456 */
1457 if (pci_dev->bridge_d3)
1458 device_wakeup_enable(dev);
1459
1460 acpi_pci_wakeup(pci_dev, false);
1461 acpi_device_power_add_dependent(adev, dev);
1462
1463 if (pci_is_bridge(pci_dev))
1464 acpi_dev_power_up_children_with_adr(adev);
1465 }
1466
pci_acpi_cleanup(struct device * dev,struct acpi_device * adev)1467 void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev)
1468 {
1469 struct pci_dev *pci_dev = to_pci_dev(dev);
1470
1471 pci_acpi_remove_edr_notifier(pci_dev);
1472 pci_acpi_remove_pm_notifier(adev);
1473 if (adev->wakeup.flags.valid) {
1474 acpi_device_power_remove_dependent(adev, dev);
1475 if (pci_dev->bridge_d3)
1476 device_wakeup_disable(dev);
1477
1478 device_set_wakeup_capable(dev, false);
1479 }
1480 }
1481
1482 static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
1483
1484 /**
1485 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
1486 * @fn: Callback matching a device to a fwnode that identifies a PCI
1487 * MSI domain.
1488 *
1489 * This should be called by irqchip driver, which is the parent of
1490 * the MSI domain to provide callback interface to query fwnode.
1491 */
1492 void
pci_msi_register_fwnode_provider(struct fwnode_handle * (* fn)(struct device *))1493 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
1494 {
1495 pci_msi_get_fwnode_cb = fn;
1496 }
1497
1498 /**
1499 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
1500 * @bus: The PCI host bridge bus.
1501 *
1502 * This function uses the callback function registered by
1503 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
1504 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
1505 * This returns NULL on error or when the domain is not found.
1506 */
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)1507 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
1508 {
1509 struct fwnode_handle *fwnode;
1510
1511 if (!pci_msi_get_fwnode_cb)
1512 return NULL;
1513
1514 fwnode = pci_msi_get_fwnode_cb(&bus->dev);
1515 if (!fwnode)
1516 return NULL;
1517
1518 return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
1519 }
1520
acpi_pci_init(void)1521 static int __init acpi_pci_init(void)
1522 {
1523 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
1524 pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
1525 pci_no_msi();
1526 }
1527
1528 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
1529 pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
1530 pcie_no_aspm();
1531 }
1532
1533 if (acpi_pci_disabled)
1534 return 0;
1535
1536 acpi_pci_slot_init();
1537 acpiphp_init();
1538
1539 return 0;
1540 }
1541 arch_initcall(acpi_pci_init);
1542
1543 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
1544
1545 /*
1546 * Try to assign the IRQ number when probing a new device
1547 */
pcibios_alloc_irq(struct pci_dev * dev)1548 int pcibios_alloc_irq(struct pci_dev *dev)
1549 {
1550 if (!acpi_disabled)
1551 acpi_pci_irq_enable(dev);
1552
1553 return 0;
1554 }
1555
1556 struct acpi_pci_generic_root_info {
1557 struct acpi_pci_root_info common;
1558 struct pci_config_window *cfg; /* config space mapping */
1559 };
1560
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1561 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1562 {
1563 struct pci_config_window *cfg = bus->sysdata;
1564 struct acpi_device *adev = to_acpi_device(cfg->parent);
1565 struct acpi_pci_root *root = acpi_driver_data(adev);
1566
1567 return root->segment;
1568 }
1569
pcibios_root_bridge_prepare(struct pci_host_bridge * bridge)1570 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1571 {
1572 struct pci_config_window *cfg;
1573 struct acpi_device *adev;
1574 struct device *bus_dev;
1575
1576 if (acpi_disabled)
1577 return 0;
1578
1579 cfg = bridge->bus->sysdata;
1580
1581 /*
1582 * On Hyper-V there is no corresponding ACPI device for a root bridge,
1583 * therefore ->parent is set as NULL by the driver. And set 'adev' as
1584 * NULL in this case because there is no proper ACPI device.
1585 */
1586 if (!cfg->parent)
1587 adev = NULL;
1588 else
1589 adev = to_acpi_device(cfg->parent);
1590
1591 bus_dev = &bridge->bus->dev;
1592
1593 ACPI_COMPANION_SET(&bridge->dev, adev);
1594 set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev)));
1595
1596 return 0;
1597 }
1598
pci_acpi_root_prepare_resources(struct acpi_pci_root_info * ci)1599 static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
1600 {
1601 struct resource_entry *entry, *tmp;
1602 int status;
1603
1604 status = acpi_pci_probe_root_resources(ci);
1605 resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
1606 if (!(entry->res->flags & IORESOURCE_WINDOW))
1607 resource_list_destroy_entry(entry);
1608 }
1609 return status;
1610 }
1611
1612 /*
1613 * Lookup the bus range for the domain in MCFG, and set up config space
1614 * mapping.
1615 */
1616 static struct pci_config_window *
pci_acpi_setup_ecam_mapping(struct acpi_pci_root * root)1617 pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
1618 {
1619 struct device *dev = &root->device->dev;
1620 struct resource *bus_res = &root->secondary;
1621 u16 seg = root->segment;
1622 const struct pci_ecam_ops *ecam_ops;
1623 struct resource cfgres;
1624 struct acpi_device *adev;
1625 struct pci_config_window *cfg;
1626 int ret;
1627
1628 ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops);
1629 if (ret) {
1630 dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res);
1631 return NULL;
1632 }
1633
1634 adev = acpi_resource_consumer(&cfgres);
1635 if (adev)
1636 dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres,
1637 dev_name(&adev->dev));
1638 else
1639 dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n",
1640 &cfgres);
1641
1642 cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
1643 if (IS_ERR(cfg)) {
1644 dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res,
1645 PTR_ERR(cfg));
1646 return NULL;
1647 }
1648
1649 return cfg;
1650 }
1651
1652 /* release_info: free resources allocated by init_info */
pci_acpi_generic_release_info(struct acpi_pci_root_info * ci)1653 static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
1654 {
1655 struct acpi_pci_generic_root_info *ri;
1656
1657 ri = container_of(ci, struct acpi_pci_generic_root_info, common);
1658 pci_ecam_free(ri->cfg);
1659 kfree(ci->ops);
1660 kfree(ri);
1661 }
1662
1663 /* Interface called from ACPI code to setup PCI host controller */
pci_acpi_scan_root(struct acpi_pci_root * root)1664 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
1665 {
1666 struct acpi_pci_generic_root_info *ri;
1667 struct pci_bus *bus, *child;
1668 struct acpi_pci_root_ops *root_ops;
1669 struct pci_host_bridge *host;
1670
1671 ri = kzalloc(sizeof(*ri), GFP_KERNEL);
1672 if (!ri)
1673 return NULL;
1674
1675 root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
1676 if (!root_ops) {
1677 kfree(ri);
1678 return NULL;
1679 }
1680
1681 ri->cfg = pci_acpi_setup_ecam_mapping(root);
1682 if (!ri->cfg) {
1683 kfree(ri);
1684 kfree(root_ops);
1685 return NULL;
1686 }
1687
1688 root_ops->release_info = pci_acpi_generic_release_info;
1689 root_ops->prepare_resources = pci_acpi_root_prepare_resources;
1690 root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
1691 bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
1692 if (!bus)
1693 return NULL;
1694
1695 /* If we must preserve the resource configuration, claim now */
1696 host = pci_find_host_bridge(bus);
1697 if (host->preserve_config)
1698 pci_bus_claim_resources(bus);
1699
1700 /*
1701 * Assign whatever was left unassigned. If we didn't claim above,
1702 * this will reassign everything.
1703 */
1704 pci_assign_unassigned_root_bus_resources(bus);
1705
1706 list_for_each_entry(child, &bus->children, node)
1707 pcie_bus_configure_settings(child);
1708
1709 return bus;
1710 }
1711
pcibios_add_bus(struct pci_bus * bus)1712 void pcibios_add_bus(struct pci_bus *bus)
1713 {
1714 acpi_pci_add_bus(bus);
1715 }
1716
pcibios_remove_bus(struct pci_bus * bus)1717 void pcibios_remove_bus(struct pci_bus *bus)
1718 {
1719 acpi_pci_remove_bus(bus);
1720 }
1721
1722 #endif
1723