1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI support in ACPI 4 * 5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com> 6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com> 7 * Copyright (C) 2004 Intel Corp. 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/init.h> 12 #include <linux/irqdomain.h> 13 #include <linux/pci.h> 14 #include <linux/msi.h> 15 #include <linux/pci_hotplug.h> 16 #include <linux/module.h> 17 #include <linux/pci-acpi.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/pm_qos.h> 20 #include <linux/rwsem.h> 21 #include "pci.h" 22 23 /* 24 * The GUID is defined in the PCI Firmware Specification available 25 * here to PCI-SIG members: 26 * https://members.pcisig.com/wg/PCI-SIG/document/15350 27 */ 28 const guid_t pci_acpi_dsm_guid = 29 GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a, 30 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d); 31 32 #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) 33 static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) 34 { 35 struct device *dev = &adev->dev; 36 struct resource_entry *entry; 37 struct list_head list; 38 unsigned long flags; 39 int ret; 40 41 INIT_LIST_HEAD(&list); 42 flags = IORESOURCE_MEM; 43 ret = acpi_dev_get_resources(adev, &list, 44 acpi_dev_filter_resource_type_cb, 45 (void *) flags); 46 if (ret < 0) { 47 dev_err(dev, "failed to parse _CRS method, error code %d\n", 48 ret); 49 return ret; 50 } 51 52 if (ret == 0) { 53 dev_err(dev, "no IO and memory resources present in _CRS\n"); 54 return -EINVAL; 55 } 56 57 entry = list_first_entry(&list, struct resource_entry, node); 58 *res = *entry->res; 59 acpi_dev_free_resource_list(&list); 60 return 0; 61 } 62 63 static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context, 64 void **retval) 65 { 66 u16 *segment = context; 67 unsigned long long uid; 68 acpi_status status; 69 70 status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid); 71 if (ACPI_FAILURE(status) || uid != *segment) 72 return AE_CTRL_DEPTH; 73 74 *(acpi_handle *)retval = handle; 75 return AE_CTRL_TERMINATE; 76 } 77 78 int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, 79 struct resource *res) 80 { 81 struct acpi_device *adev; 82 acpi_status status; 83 acpi_handle handle; 84 int ret; 85 86 status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle); 87 if (ACPI_FAILURE(status)) { 88 dev_err(dev, "can't find _HID %s device to locate resources\n", 89 hid); 90 return -ENODEV; 91 } 92 93 adev = acpi_fetch_acpi_dev(handle); 94 if (!adev) 95 return -ENODEV; 96 97 ret = acpi_get_rc_addr(adev, res); 98 if (ret) { 99 dev_err(dev, "can't get resource from %s\n", 100 dev_name(&adev->dev)); 101 return ret; 102 } 103 104 return 0; 105 } 106 #endif 107 108 phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) 109 { 110 acpi_status status = AE_NOT_EXIST; 111 unsigned long long mcfg_addr; 112 113 if (handle) 114 status = acpi_evaluate_integer(handle, METHOD_NAME__CBA, 115 NULL, &mcfg_addr); 116 if (ACPI_FAILURE(status)) 117 return 0; 118 119 return (phys_addr_t)mcfg_addr; 120 } 121 122 bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge) 123 { 124 if (ACPI_HANDLE(&host_bridge->dev)) { 125 union acpi_object *obj; 126 127 /* 128 * Evaluate the "PCI Boot Configuration" _DSM Function. If it 129 * exists and returns 0, we must preserve any PCI resource 130 * assignments made by firmware for this host bridge. 131 */ 132 obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(&host_bridge->dev), 133 &pci_acpi_dsm_guid, 134 1, DSM_PCI_PRESERVE_BOOT_CONFIG, 135 NULL, ACPI_TYPE_INTEGER); 136 if (obj && obj->integer.value == 0) 137 return true; 138 ACPI_FREE(obj); 139 } 140 141 return false; 142 } 143 144 /* _HPX PCI Setting Record (Type 0); same as _HPP */ 145 struct hpx_type0 { 146 u32 revision; /* Not present in _HPP */ 147 u8 cache_line_size; /* Not applicable to PCIe */ 148 u8 latency_timer; /* Not applicable to PCIe */ 149 u8 enable_serr; 150 u8 enable_perr; 151 }; 152 153 static struct hpx_type0 pci_default_type0 = { 154 .revision = 1, 155 .cache_line_size = 8, 156 .latency_timer = 0x40, 157 .enable_serr = 0, 158 .enable_perr = 0, 159 }; 160 161 static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx) 162 { 163 u16 pci_cmd, pci_bctl; 164 165 if (!hpx) 166 hpx = &pci_default_type0; 167 168 if (hpx->revision > 1) { 169 pci_warn(dev, "PCI settings rev %d not supported; using defaults\n", 170 hpx->revision); 171 hpx = &pci_default_type0; 172 } 173 174 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size); 175 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer); 176 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); 177 if (hpx->enable_serr) 178 pci_cmd |= PCI_COMMAND_SERR; 179 if (hpx->enable_perr) 180 pci_cmd |= PCI_COMMAND_PARITY; 181 pci_write_config_word(dev, PCI_COMMAND, pci_cmd); 182 183 /* Program bridge control value */ 184 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 185 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 186 hpx->latency_timer); 187 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); 188 if (hpx->enable_perr) 189 pci_bctl |= PCI_BRIDGE_CTL_PARITY; 190 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); 191 } 192 } 193 194 static acpi_status decode_type0_hpx_record(union acpi_object *record, 195 struct hpx_type0 *hpx0) 196 { 197 int i; 198 union acpi_object *fields = record->package.elements; 199 u32 revision = fields[1].integer.value; 200 201 switch (revision) { 202 case 1: 203 if (record->package.count != 6) 204 return AE_ERROR; 205 for (i = 2; i < 6; i++) 206 if (fields[i].type != ACPI_TYPE_INTEGER) 207 return AE_ERROR; 208 hpx0->revision = revision; 209 hpx0->cache_line_size = fields[2].integer.value; 210 hpx0->latency_timer = fields[3].integer.value; 211 hpx0->enable_serr = fields[4].integer.value; 212 hpx0->enable_perr = fields[5].integer.value; 213 break; 214 default: 215 pr_warn("%s: Type 0 Revision %d record not supported\n", 216 __func__, revision); 217 return AE_ERROR; 218 } 219 return AE_OK; 220 } 221 222 /* _HPX PCI-X Setting Record (Type 1) */ 223 struct hpx_type1 { 224 u32 revision; 225 u8 max_mem_read; 226 u8 avg_max_split; 227 u16 tot_max_split; 228 }; 229 230 static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx) 231 { 232 int pos; 233 234 if (!hpx) 235 return; 236 237 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 238 if (!pos) 239 return; 240 241 pci_warn(dev, "PCI-X settings not supported\n"); 242 } 243 244 static acpi_status decode_type1_hpx_record(union acpi_object *record, 245 struct hpx_type1 *hpx1) 246 { 247 int i; 248 union acpi_object *fields = record->package.elements; 249 u32 revision = fields[1].integer.value; 250 251 switch (revision) { 252 case 1: 253 if (record->package.count != 5) 254 return AE_ERROR; 255 for (i = 2; i < 5; i++) 256 if (fields[i].type != ACPI_TYPE_INTEGER) 257 return AE_ERROR; 258 hpx1->revision = revision; 259 hpx1->max_mem_read = fields[2].integer.value; 260 hpx1->avg_max_split = fields[3].integer.value; 261 hpx1->tot_max_split = fields[4].integer.value; 262 break; 263 default: 264 pr_warn("%s: Type 1 Revision %d record not supported\n", 265 __func__, revision); 266 return AE_ERROR; 267 } 268 return AE_OK; 269 } 270 271 static bool pcie_root_rcb_set(struct pci_dev *dev) 272 { 273 struct pci_dev *rp = pcie_find_root_port(dev); 274 u16 lnkctl; 275 276 if (!rp) 277 return false; 278 279 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); 280 if (lnkctl & PCI_EXP_LNKCTL_RCB) 281 return true; 282 283 return false; 284 } 285 286 /* _HPX PCI Express Setting Record (Type 2) */ 287 struct hpx_type2 { 288 u32 revision; 289 u32 unc_err_mask_and; 290 u32 unc_err_mask_or; 291 u32 unc_err_sever_and; 292 u32 unc_err_sever_or; 293 u32 cor_err_mask_and; 294 u32 cor_err_mask_or; 295 u32 adv_err_cap_and; 296 u32 adv_err_cap_or; 297 u16 pci_exp_devctl_and; 298 u16 pci_exp_devctl_or; 299 u16 pci_exp_lnkctl_and; 300 u16 pci_exp_lnkctl_or; 301 u32 sec_unc_err_sever_and; 302 u32 sec_unc_err_sever_or; 303 u32 sec_unc_err_mask_and; 304 u32 sec_unc_err_mask_or; 305 }; 306 307 static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx) 308 { 309 int pos; 310 u32 reg32; 311 312 if (!hpx) 313 return; 314 315 if (!pci_is_pcie(dev)) 316 return; 317 318 if (hpx->revision > 1) { 319 pci_warn(dev, "PCIe settings rev %d not supported\n", 320 hpx->revision); 321 return; 322 } 323 324 /* 325 * Don't allow _HPX to change MPS or MRRS settings. We manage 326 * those to make sure they're consistent with the rest of the 327 * platform. 328 */ 329 hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | 330 PCI_EXP_DEVCTL_READRQ; 331 hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | 332 PCI_EXP_DEVCTL_READRQ); 333 334 /* Initialize Device Control Register */ 335 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 336 ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or); 337 338 /* Initialize Link Control Register */ 339 if (pcie_cap_has_lnkctl(dev)) { 340 341 /* 342 * If the Root Port supports Read Completion Boundary of 343 * 128, set RCB to 128. Otherwise, clear it. 344 */ 345 hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; 346 hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; 347 if (pcie_root_rcb_set(dev)) 348 hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; 349 350 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, 351 ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or); 352 } 353 354 /* Find Advanced Error Reporting Enhanced Capability */ 355 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 356 if (!pos) 357 return; 358 359 /* Initialize Uncorrectable Error Mask Register */ 360 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); 361 reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or; 362 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); 363 364 /* Initialize Uncorrectable Error Severity Register */ 365 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); 366 reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or; 367 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); 368 369 /* Initialize Correctable Error Mask Register */ 370 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); 371 reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or; 372 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); 373 374 /* Initialize Advanced Error Capabilities and Control Register */ 375 pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); 376 reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or; 377 378 /* Don't enable ECRC generation or checking if unsupported */ 379 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) 380 reg32 &= ~PCI_ERR_CAP_ECRC_GENE; 381 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) 382 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; 383 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); 384 385 /* 386 * FIXME: The following two registers are not supported yet. 387 * 388 * o Secondary Uncorrectable Error Severity Register 389 * o Secondary Uncorrectable Error Mask Register 390 */ 391 } 392 393 static acpi_status decode_type2_hpx_record(union acpi_object *record, 394 struct hpx_type2 *hpx2) 395 { 396 int i; 397 union acpi_object *fields = record->package.elements; 398 u32 revision = fields[1].integer.value; 399 400 switch (revision) { 401 case 1: 402 if (record->package.count != 18) 403 return AE_ERROR; 404 for (i = 2; i < 18; i++) 405 if (fields[i].type != ACPI_TYPE_INTEGER) 406 return AE_ERROR; 407 hpx2->revision = revision; 408 hpx2->unc_err_mask_and = fields[2].integer.value; 409 hpx2->unc_err_mask_or = fields[3].integer.value; 410 hpx2->unc_err_sever_and = fields[4].integer.value; 411 hpx2->unc_err_sever_or = fields[5].integer.value; 412 hpx2->cor_err_mask_and = fields[6].integer.value; 413 hpx2->cor_err_mask_or = fields[7].integer.value; 414 hpx2->adv_err_cap_and = fields[8].integer.value; 415 hpx2->adv_err_cap_or = fields[9].integer.value; 416 hpx2->pci_exp_devctl_and = fields[10].integer.value; 417 hpx2->pci_exp_devctl_or = fields[11].integer.value; 418 hpx2->pci_exp_lnkctl_and = fields[12].integer.value; 419 hpx2->pci_exp_lnkctl_or = fields[13].integer.value; 420 hpx2->sec_unc_err_sever_and = fields[14].integer.value; 421 hpx2->sec_unc_err_sever_or = fields[15].integer.value; 422 hpx2->sec_unc_err_mask_and = fields[16].integer.value; 423 hpx2->sec_unc_err_mask_or = fields[17].integer.value; 424 break; 425 default: 426 pr_warn("%s: Type 2 Revision %d record not supported\n", 427 __func__, revision); 428 return AE_ERROR; 429 } 430 return AE_OK; 431 } 432 433 /* _HPX PCI Express Setting Record (Type 3) */ 434 struct hpx_type3 { 435 u16 device_type; 436 u16 function_type; 437 u16 config_space_location; 438 u16 pci_exp_cap_id; 439 u16 pci_exp_cap_ver; 440 u16 pci_exp_vendor_id; 441 u16 dvsec_id; 442 u16 dvsec_rev; 443 u16 match_offset; 444 u32 match_mask_and; 445 u32 match_value; 446 u16 reg_offset; 447 u32 reg_mask_and; 448 u32 reg_mask_or; 449 }; 450 451 enum hpx_type3_dev_type { 452 HPX_TYPE_ENDPOINT = BIT(0), 453 HPX_TYPE_LEG_END = BIT(1), 454 HPX_TYPE_RC_END = BIT(2), 455 HPX_TYPE_RC_EC = BIT(3), 456 HPX_TYPE_ROOT_PORT = BIT(4), 457 HPX_TYPE_UPSTREAM = BIT(5), 458 HPX_TYPE_DOWNSTREAM = BIT(6), 459 HPX_TYPE_PCI_BRIDGE = BIT(7), 460 HPX_TYPE_PCIE_BRIDGE = BIT(8), 461 }; 462 463 static u16 hpx3_device_type(struct pci_dev *dev) 464 { 465 u16 pcie_type = pci_pcie_type(dev); 466 static const int pcie_to_hpx3_type[] = { 467 [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT, 468 [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END, 469 [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END, 470 [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC, 471 [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT, 472 [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM, 473 [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM, 474 [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE, 475 [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE, 476 }; 477 478 if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type)) 479 return 0; 480 481 return pcie_to_hpx3_type[pcie_type]; 482 } 483 484 enum hpx_type3_fn_type { 485 HPX_FN_NORMAL = BIT(0), 486 HPX_FN_SRIOV_PHYS = BIT(1), 487 HPX_FN_SRIOV_VIRT = BIT(2), 488 }; 489 490 static u8 hpx3_function_type(struct pci_dev *dev) 491 { 492 if (dev->is_virtfn) 493 return HPX_FN_SRIOV_VIRT; 494 else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0) 495 return HPX_FN_SRIOV_PHYS; 496 else 497 return HPX_FN_NORMAL; 498 } 499 500 static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id) 501 { 502 u8 cap_ver = hpx3_cap_id & 0xf; 503 504 if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id) 505 return true; 506 else if (cap_ver == pcie_cap_id) 507 return true; 508 509 return false; 510 } 511 512 enum hpx_type3_cfg_loc { 513 HPX_CFG_PCICFG = 0, 514 HPX_CFG_PCIE_CAP = 1, 515 HPX_CFG_PCIE_CAP_EXT = 2, 516 HPX_CFG_VEND_CAP = 3, 517 HPX_CFG_DVSEC = 4, 518 HPX_CFG_MAX, 519 }; 520 521 static void program_hpx_type3_register(struct pci_dev *dev, 522 const struct hpx_type3 *reg) 523 { 524 u32 match_reg, write_reg, header, orig_value; 525 u16 pos; 526 527 if (!(hpx3_device_type(dev) & reg->device_type)) 528 return; 529 530 if (!(hpx3_function_type(dev) & reg->function_type)) 531 return; 532 533 switch (reg->config_space_location) { 534 case HPX_CFG_PCICFG: 535 pos = 0; 536 break; 537 case HPX_CFG_PCIE_CAP: 538 pos = pci_find_capability(dev, reg->pci_exp_cap_id); 539 if (pos == 0) 540 return; 541 542 break; 543 case HPX_CFG_PCIE_CAP_EXT: 544 pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id); 545 if (pos == 0) 546 return; 547 548 pci_read_config_dword(dev, pos, &header); 549 if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header), 550 reg->pci_exp_cap_ver)) 551 return; 552 553 break; 554 case HPX_CFG_VEND_CAP: 555 case HPX_CFG_DVSEC: 556 default: 557 pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location"); 558 return; 559 } 560 561 pci_read_config_dword(dev, pos + reg->match_offset, &match_reg); 562 563 if ((match_reg & reg->match_mask_and) != reg->match_value) 564 return; 565 566 pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg); 567 orig_value = write_reg; 568 write_reg &= reg->reg_mask_and; 569 write_reg |= reg->reg_mask_or; 570 571 if (orig_value == write_reg) 572 return; 573 574 pci_write_config_dword(dev, pos + reg->reg_offset, write_reg); 575 576 pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x", 577 pos, orig_value, write_reg); 578 } 579 580 static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx) 581 { 582 if (!hpx) 583 return; 584 585 if (!pci_is_pcie(dev)) 586 return; 587 588 program_hpx_type3_register(dev, hpx); 589 } 590 591 static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, 592 union acpi_object *reg_fields) 593 { 594 hpx3_reg->device_type = reg_fields[0].integer.value; 595 hpx3_reg->function_type = reg_fields[1].integer.value; 596 hpx3_reg->config_space_location = reg_fields[2].integer.value; 597 hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value; 598 hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value; 599 hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value; 600 hpx3_reg->dvsec_id = reg_fields[6].integer.value; 601 hpx3_reg->dvsec_rev = reg_fields[7].integer.value; 602 hpx3_reg->match_offset = reg_fields[8].integer.value; 603 hpx3_reg->match_mask_and = reg_fields[9].integer.value; 604 hpx3_reg->match_value = reg_fields[10].integer.value; 605 hpx3_reg->reg_offset = reg_fields[11].integer.value; 606 hpx3_reg->reg_mask_and = reg_fields[12].integer.value; 607 hpx3_reg->reg_mask_or = reg_fields[13].integer.value; 608 } 609 610 static acpi_status program_type3_hpx_record(struct pci_dev *dev, 611 union acpi_object *record) 612 { 613 union acpi_object *fields = record->package.elements; 614 u32 desc_count, expected_length, revision; 615 union acpi_object *reg_fields; 616 struct hpx_type3 hpx3; 617 int i; 618 619 revision = fields[1].integer.value; 620 switch (revision) { 621 case 1: 622 desc_count = fields[2].integer.value; 623 expected_length = 3 + desc_count * 14; 624 625 if (record->package.count != expected_length) 626 return AE_ERROR; 627 628 for (i = 2; i < expected_length; i++) 629 if (fields[i].type != ACPI_TYPE_INTEGER) 630 return AE_ERROR; 631 632 for (i = 0; i < desc_count; i++) { 633 reg_fields = fields + 3 + i * 14; 634 parse_hpx3_register(&hpx3, reg_fields); 635 program_hpx_type3(dev, &hpx3); 636 } 637 638 break; 639 default: 640 printk(KERN_WARNING 641 "%s: Type 3 Revision %d record not supported\n", 642 __func__, revision); 643 return AE_ERROR; 644 } 645 return AE_OK; 646 } 647 648 static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle) 649 { 650 acpi_status status; 651 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 652 union acpi_object *package, *record, *fields; 653 struct hpx_type0 hpx0; 654 struct hpx_type1 hpx1; 655 struct hpx_type2 hpx2; 656 u32 type; 657 int i; 658 659 status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); 660 if (ACPI_FAILURE(status)) 661 return status; 662 663 package = (union acpi_object *)buffer.pointer; 664 if (package->type != ACPI_TYPE_PACKAGE) { 665 status = AE_ERROR; 666 goto exit; 667 } 668 669 for (i = 0; i < package->package.count; i++) { 670 record = &package->package.elements[i]; 671 if (record->type != ACPI_TYPE_PACKAGE) { 672 status = AE_ERROR; 673 goto exit; 674 } 675 676 fields = record->package.elements; 677 if (fields[0].type != ACPI_TYPE_INTEGER || 678 fields[1].type != ACPI_TYPE_INTEGER) { 679 status = AE_ERROR; 680 goto exit; 681 } 682 683 type = fields[0].integer.value; 684 switch (type) { 685 case 0: 686 memset(&hpx0, 0, sizeof(hpx0)); 687 status = decode_type0_hpx_record(record, &hpx0); 688 if (ACPI_FAILURE(status)) 689 goto exit; 690 program_hpx_type0(dev, &hpx0); 691 break; 692 case 1: 693 memset(&hpx1, 0, sizeof(hpx1)); 694 status = decode_type1_hpx_record(record, &hpx1); 695 if (ACPI_FAILURE(status)) 696 goto exit; 697 program_hpx_type1(dev, &hpx1); 698 break; 699 case 2: 700 memset(&hpx2, 0, sizeof(hpx2)); 701 status = decode_type2_hpx_record(record, &hpx2); 702 if (ACPI_FAILURE(status)) 703 goto exit; 704 program_hpx_type2(dev, &hpx2); 705 break; 706 case 3: 707 status = program_type3_hpx_record(dev, record); 708 if (ACPI_FAILURE(status)) 709 goto exit; 710 break; 711 default: 712 pr_err("%s: Type %d record not supported\n", 713 __func__, type); 714 status = AE_ERROR; 715 goto exit; 716 } 717 } 718 exit: 719 kfree(buffer.pointer); 720 return status; 721 } 722 723 static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle) 724 { 725 acpi_status status; 726 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 727 union acpi_object *package, *fields; 728 struct hpx_type0 hpx0; 729 int i; 730 731 memset(&hpx0, 0, sizeof(hpx0)); 732 733 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); 734 if (ACPI_FAILURE(status)) 735 return status; 736 737 package = (union acpi_object *) buffer.pointer; 738 if (package->type != ACPI_TYPE_PACKAGE || 739 package->package.count != 4) { 740 status = AE_ERROR; 741 goto exit; 742 } 743 744 fields = package->package.elements; 745 for (i = 0; i < 4; i++) { 746 if (fields[i].type != ACPI_TYPE_INTEGER) { 747 status = AE_ERROR; 748 goto exit; 749 } 750 } 751 752 hpx0.revision = 1; 753 hpx0.cache_line_size = fields[0].integer.value; 754 hpx0.latency_timer = fields[1].integer.value; 755 hpx0.enable_serr = fields[2].integer.value; 756 hpx0.enable_perr = fields[3].integer.value; 757 758 program_hpx_type0(dev, &hpx0); 759 760 exit: 761 kfree(buffer.pointer); 762 return status; 763 } 764 765 /* pci_acpi_program_hp_params 766 * 767 * @dev - the pci_dev for which we want parameters 768 */ 769 int pci_acpi_program_hp_params(struct pci_dev *dev) 770 { 771 acpi_status status; 772 acpi_handle handle, phandle; 773 struct pci_bus *pbus; 774 775 if (acpi_pci_disabled) 776 return -ENODEV; 777 778 handle = NULL; 779 for (pbus = dev->bus; pbus; pbus = pbus->parent) { 780 handle = acpi_pci_get_bridge_handle(pbus); 781 if (handle) 782 break; 783 } 784 785 /* 786 * _HPP settings apply to all child buses, until another _HPP is 787 * encountered. If we don't find an _HPP for the input pci dev, 788 * look for it in the parent device scope since that would apply to 789 * this pci dev. 790 */ 791 while (handle) { 792 status = acpi_run_hpx(dev, handle); 793 if (ACPI_SUCCESS(status)) 794 return 0; 795 status = acpi_run_hpp(dev, handle); 796 if (ACPI_SUCCESS(status)) 797 return 0; 798 if (acpi_is_root_bridge(handle)) 799 break; 800 status = acpi_get_parent(handle, &phandle); 801 if (ACPI_FAILURE(status)) 802 break; 803 handle = phandle; 804 } 805 return -ENODEV; 806 } 807 808 /** 809 * pciehp_is_native - Check whether a hotplug port is handled by the OS 810 * @bridge: Hotplug port to check 811 * 812 * Returns true if the given @bridge is handled by the native PCIe hotplug 813 * driver. 814 */ 815 bool pciehp_is_native(struct pci_dev *bridge) 816 { 817 const struct pci_host_bridge *host; 818 u32 slot_cap; 819 820 if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) 821 return false; 822 823 pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap); 824 if (!(slot_cap & PCI_EXP_SLTCAP_HPC)) 825 return false; 826 827 if (pcie_ports_native) 828 return true; 829 830 host = pci_find_host_bridge(bridge->bus); 831 return host->native_pcie_hotplug; 832 } 833 834 /** 835 * shpchp_is_native - Check whether a hotplug port is handled by the OS 836 * @bridge: Hotplug port to check 837 * 838 * Returns true if the given @bridge is handled by the native SHPC hotplug 839 * driver. 840 */ 841 bool shpchp_is_native(struct pci_dev *bridge) 842 { 843 return bridge->shpc_managed; 844 } 845 846 /** 847 * pci_acpi_wake_bus - Root bus wakeup notification fork function. 848 * @context: Device wakeup context. 849 */ 850 static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context) 851 { 852 struct acpi_device *adev; 853 struct acpi_pci_root *root; 854 855 adev = container_of(context, struct acpi_device, wakeup.context); 856 root = acpi_driver_data(adev); 857 pci_pme_wakeup_bus(root->bus); 858 } 859 860 /** 861 * pci_acpi_wake_dev - PCI device wakeup notification work function. 862 * @context: Device wakeup context. 863 */ 864 static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context) 865 { 866 struct pci_dev *pci_dev; 867 868 pci_dev = to_pci_dev(context->dev); 869 870 if (pci_dev->pme_poll) 871 pci_dev->pme_poll = false; 872 873 if (pci_dev->current_state == PCI_D3cold) { 874 pci_wakeup_event(pci_dev); 875 pm_request_resume(&pci_dev->dev); 876 return; 877 } 878 879 /* Clear PME Status if set. */ 880 if (pci_dev->pme_support) 881 pci_check_pme_status(pci_dev); 882 883 pci_wakeup_event(pci_dev); 884 pm_request_resume(&pci_dev->dev); 885 886 pci_pme_wakeup_bus(pci_dev->subordinate); 887 } 888 889 /** 890 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus. 891 * @dev: PCI root bridge ACPI device. 892 */ 893 acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev) 894 { 895 return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus); 896 } 897 898 /** 899 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. 900 * @dev: ACPI device to add the notifier for. 901 * @pci_dev: PCI device to check for the PME status if an event is signaled. 902 */ 903 acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, 904 struct pci_dev *pci_dev) 905 { 906 return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev); 907 } 908 909 /* 910 * _SxD returns the D-state with the highest power 911 * (lowest D-state number) supported in the S-state "x". 912 * 913 * If the devices does not have a _PRW 914 * (Power Resources for Wake) supporting system wakeup from "x" 915 * then the OS is free to choose a lower power (higher number 916 * D-state) than the return value from _SxD. 917 * 918 * But if _PRW is enabled at S-state "x", the OS 919 * must not choose a power lower than _SxD -- 920 * unless the device has an _SxW method specifying 921 * the lowest power (highest D-state number) the device 922 * may enter while still able to wake the system. 923 * 924 * ie. depending on global OS policy: 925 * 926 * if (_PRW at S-state x) 927 * choose from highest power _SxD to lowest power _SxW 928 * else // no _PRW at S-state x 929 * choose highest power _SxD or any lower power 930 */ 931 932 pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) 933 { 934 int acpi_state, d_max; 935 936 if (pdev->no_d3cold || !pdev->d3cold_allowed) 937 d_max = ACPI_STATE_D3_HOT; 938 else 939 d_max = ACPI_STATE_D3_COLD; 940 acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max); 941 if (acpi_state < 0) 942 return PCI_POWER_ERROR; 943 944 switch (acpi_state) { 945 case ACPI_STATE_D0: 946 return PCI_D0; 947 case ACPI_STATE_D1: 948 return PCI_D1; 949 case ACPI_STATE_D2: 950 return PCI_D2; 951 case ACPI_STATE_D3_HOT: 952 return PCI_D3hot; 953 case ACPI_STATE_D3_COLD: 954 return PCI_D3cold; 955 } 956 return PCI_POWER_ERROR; 957 } 958 959 static struct acpi_device *acpi_pci_find_companion(struct device *dev); 960 961 void pci_set_acpi_fwnode(struct pci_dev *dev) 962 { 963 if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev)) 964 ACPI_COMPANION_SET(&dev->dev, 965 acpi_pci_find_companion(&dev->dev)); 966 } 967 968 /** 969 * pci_dev_acpi_reset - do a function level reset using _RST method 970 * @dev: device to reset 971 * @probe: if true, return 0 if device supports _RST 972 */ 973 int pci_dev_acpi_reset(struct pci_dev *dev, bool probe) 974 { 975 acpi_handle handle = ACPI_HANDLE(&dev->dev); 976 977 if (!handle || !acpi_has_method(handle, "_RST")) 978 return -ENOTTY; 979 980 if (probe) 981 return 0; 982 983 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, NULL))) { 984 pci_warn(dev, "ACPI _RST failed\n"); 985 return -ENOTTY; 986 } 987 988 return 0; 989 } 990 991 bool acpi_pci_power_manageable(struct pci_dev *dev) 992 { 993 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 994 995 return adev && acpi_device_power_manageable(adev); 996 } 997 998 bool acpi_pci_bridge_d3(struct pci_dev *dev) 999 { 1000 struct pci_dev *rpdev; 1001 struct acpi_device *adev, *rpadev; 1002 const union acpi_object *obj; 1003 1004 if (acpi_pci_disabled || !dev->is_hotplug_bridge) 1005 return false; 1006 1007 adev = ACPI_COMPANION(&dev->dev); 1008 if (adev) { 1009 /* 1010 * If the bridge has _S0W, whether or not it can go into D3 1011 * depends on what is returned by that object. In particular, 1012 * if the power state returned by _S0W is D2 or shallower, 1013 * entering D3 should not be allowed. 1014 */ 1015 if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2) 1016 return false; 1017 1018 /* 1019 * Otherwise, assume that the bridge can enter D3 so long as it 1020 * is power-manageable via ACPI. 1021 */ 1022 if (acpi_device_power_manageable(adev)) 1023 return true; 1024 } 1025 1026 rpdev = pcie_find_root_port(dev); 1027 if (!rpdev) 1028 return false; 1029 1030 if (rpdev == dev) 1031 rpadev = adev; 1032 else 1033 rpadev = ACPI_COMPANION(&rpdev->dev); 1034 1035 if (!rpadev) 1036 return false; 1037 1038 /* 1039 * If the Root Port cannot signal wakeup signals at all, i.e., it 1040 * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug 1041 * events from low-power states including D3hot and D3cold. 1042 */ 1043 if (!rpadev->wakeup.flags.valid) 1044 return false; 1045 1046 /* 1047 * In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port 1048 * to verify whether or not it can signal wakeup from D3. 1049 */ 1050 if (rpadev != adev && 1051 acpi_dev_power_state_for_wake(rpadev) <= ACPI_STATE_D2) 1052 return false; 1053 1054 /* 1055 * The "HotPlugSupportInD3" property in a Root Port _DSD indicates 1056 * the Port can signal hotplug events while in D3. We assume any 1057 * bridges *below* that Root Port can also signal hotplug events 1058 * while in D3. 1059 */ 1060 if (!acpi_dev_get_property(rpadev, "HotPlugSupportInD3", 1061 ACPI_TYPE_INTEGER, &obj) && 1062 obj->integer.value == 1) 1063 return true; 1064 1065 return false; 1066 } 1067 1068 static void acpi_pci_config_space_access(struct pci_dev *dev, bool enable) 1069 { 1070 int val = enable ? ACPI_REG_CONNECT : ACPI_REG_DISCONNECT; 1071 int ret = acpi_evaluate_reg(ACPI_HANDLE(&dev->dev), 1072 ACPI_ADR_SPACE_PCI_CONFIG, val); 1073 if (ret) 1074 pci_dbg(dev, "ACPI _REG %s evaluation failed (%d)\n", 1075 enable ? "connect" : "disconnect", ret); 1076 } 1077 1078 int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) 1079 { 1080 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 1081 static const u8 state_conv[] = { 1082 [PCI_D0] = ACPI_STATE_D0, 1083 [PCI_D1] = ACPI_STATE_D1, 1084 [PCI_D2] = ACPI_STATE_D2, 1085 [PCI_D3hot] = ACPI_STATE_D3_HOT, 1086 [PCI_D3cold] = ACPI_STATE_D3_COLD, 1087 }; 1088 int error; 1089 1090 /* If the ACPI device has _EJ0, ignore the device */ 1091 if (!adev || acpi_has_method(adev->handle, "_EJ0")) 1092 return -ENODEV; 1093 1094 switch (state) { 1095 case PCI_D0: 1096 case PCI_D1: 1097 case PCI_D2: 1098 case PCI_D3hot: 1099 case PCI_D3cold: 1100 break; 1101 default: 1102 return -EINVAL; 1103 } 1104 1105 if (state == PCI_D3cold) { 1106 if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == 1107 PM_QOS_FLAGS_ALL) 1108 return -EBUSY; 1109 1110 /* Notify AML lack of PCI config space availability */ 1111 acpi_pci_config_space_access(dev, false); 1112 } 1113 1114 error = acpi_device_set_power(adev, state_conv[state]); 1115 if (error) 1116 return error; 1117 1118 pci_dbg(dev, "power state changed by ACPI to %s\n", 1119 acpi_power_state_string(adev->power.state)); 1120 1121 /* 1122 * Notify AML of PCI config space availability. Config space is 1123 * accessible in all states except D3cold; the only transitions 1124 * that change availability are transitions to D3cold and from 1125 * D3cold to D0. 1126 */ 1127 if (state == PCI_D0) 1128 acpi_pci_config_space_access(dev, true); 1129 1130 return 0; 1131 } 1132 1133 pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) 1134 { 1135 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 1136 static const pci_power_t state_conv[] = { 1137 [ACPI_STATE_D0] = PCI_D0, 1138 [ACPI_STATE_D1] = PCI_D1, 1139 [ACPI_STATE_D2] = PCI_D2, 1140 [ACPI_STATE_D3_HOT] = PCI_D3hot, 1141 [ACPI_STATE_D3_COLD] = PCI_D3cold, 1142 }; 1143 int state; 1144 1145 if (!adev || !acpi_device_power_manageable(adev)) 1146 return PCI_UNKNOWN; 1147 1148 state = adev->power.state; 1149 if (state == ACPI_STATE_UNKNOWN) 1150 return PCI_UNKNOWN; 1151 1152 return state_conv[state]; 1153 } 1154 1155 void acpi_pci_refresh_power_state(struct pci_dev *dev) 1156 { 1157 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 1158 1159 if (adev && acpi_device_power_manageable(adev)) 1160 acpi_device_update_power(adev, NULL); 1161 } 1162 1163 static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) 1164 { 1165 while (bus->parent) { 1166 if (acpi_pm_device_can_wakeup(&bus->self->dev)) 1167 return acpi_pm_set_device_wakeup(&bus->self->dev, enable); 1168 1169 bus = bus->parent; 1170 } 1171 1172 /* We have reached the root bus. */ 1173 if (bus->bridge) { 1174 if (acpi_pm_device_can_wakeup(bus->bridge)) 1175 return acpi_pm_set_device_wakeup(bus->bridge, enable); 1176 } 1177 return 0; 1178 } 1179 1180 int acpi_pci_wakeup(struct pci_dev *dev, bool enable) 1181 { 1182 if (acpi_pci_disabled) 1183 return 0; 1184 1185 if (acpi_pm_device_can_wakeup(&dev->dev)) 1186 return acpi_pm_set_device_wakeup(&dev->dev, enable); 1187 1188 return acpi_pci_propagate_wakeup(dev->bus, enable); 1189 } 1190 1191 bool acpi_pci_need_resume(struct pci_dev *dev) 1192 { 1193 struct acpi_device *adev; 1194 1195 if (acpi_pci_disabled) 1196 return false; 1197 1198 /* 1199 * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over 1200 * system-wide suspend/resume confuses the platform firmware, so avoid 1201 * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint 1202 * devices are expected to be in D3 before invoking the S3 entry path 1203 * from the firmware, so they should not be affected by this issue. 1204 */ 1205 if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0) 1206 return true; 1207 1208 adev = ACPI_COMPANION(&dev->dev); 1209 if (!adev || !acpi_device_power_manageable(adev)) 1210 return false; 1211 1212 if (adev->wakeup.flags.valid && 1213 device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) 1214 return true; 1215 1216 if (acpi_target_system_state() == ACPI_STATE_S0) 1217 return false; 1218 1219 return !!adev->power.flags.dsw_present; 1220 } 1221 1222 void acpi_pci_add_bus(struct pci_bus *bus) 1223 { 1224 union acpi_object *obj; 1225 struct pci_host_bridge *bridge; 1226 1227 if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) 1228 return; 1229 1230 acpi_pci_slot_enumerate(bus); 1231 acpiphp_enumerate_slots(bus); 1232 1233 /* 1234 * For a host bridge, check its _DSM for function 8 and if 1235 * that is available, mark it in pci_host_bridge. 1236 */ 1237 if (!pci_is_root_bus(bus)) 1238 return; 1239 1240 obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3, 1241 DSM_PCI_POWER_ON_RESET_DELAY, NULL, ACPI_TYPE_INTEGER); 1242 if (!obj) 1243 return; 1244 1245 if (obj->integer.value == 1) { 1246 bridge = pci_find_host_bridge(bus); 1247 bridge->ignore_reset_delay = 1; 1248 } 1249 ACPI_FREE(obj); 1250 } 1251 1252 void acpi_pci_remove_bus(struct pci_bus *bus) 1253 { 1254 if (acpi_pci_disabled || !bus->bridge) 1255 return; 1256 1257 acpiphp_remove_slots(bus); 1258 acpi_pci_slot_remove(bus); 1259 } 1260 1261 /* ACPI bus type */ 1262 1263 1264 static DECLARE_RWSEM(pci_acpi_companion_lookup_sem); 1265 static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *); 1266 1267 /** 1268 * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback. 1269 * @func: ACPI companion lookup callback pointer or NULL. 1270 * 1271 * Set a special ACPI companion lookup callback for PCI devices whose companion 1272 * objects in the ACPI namespace have _ADR with non-standard bus-device-function 1273 * encodings. 1274 * 1275 * Return 0 on success or a negative error code on failure (in which case no 1276 * changes are made). 1277 * 1278 * The caller is responsible for the appropriate ordering of the invocations of 1279 * this function with respect to the enumeration of the PCI devices needing the 1280 * callback installed by it. 1281 */ 1282 int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *)) 1283 { 1284 int ret; 1285 1286 if (!func) 1287 return -EINVAL; 1288 1289 down_write(&pci_acpi_companion_lookup_sem); 1290 1291 if (pci_acpi_find_companion_hook) { 1292 ret = -EBUSY; 1293 } else { 1294 pci_acpi_find_companion_hook = func; 1295 ret = 0; 1296 } 1297 1298 up_write(&pci_acpi_companion_lookup_sem); 1299 1300 return ret; 1301 } 1302 EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook); 1303 1304 /** 1305 * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback. 1306 * 1307 * Clear the special ACPI companion lookup callback previously set by 1308 * pci_acpi_set_companion_lookup_hook(). Block until the last running instance 1309 * of the callback returns before clearing it. 1310 * 1311 * The caller is responsible for the appropriate ordering of the invocations of 1312 * this function with respect to the enumeration of the PCI devices needing the 1313 * callback cleared by it. 1314 */ 1315 void pci_acpi_clear_companion_lookup_hook(void) 1316 { 1317 down_write(&pci_acpi_companion_lookup_sem); 1318 1319 pci_acpi_find_companion_hook = NULL; 1320 1321 up_write(&pci_acpi_companion_lookup_sem); 1322 } 1323 EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook); 1324 1325 static struct acpi_device *acpi_pci_find_companion(struct device *dev) 1326 { 1327 struct pci_dev *pci_dev = to_pci_dev(dev); 1328 struct acpi_device *adev; 1329 bool check_children; 1330 u64 addr; 1331 1332 if (!dev->parent) 1333 return NULL; 1334 1335 down_read(&pci_acpi_companion_lookup_sem); 1336 1337 adev = pci_acpi_find_companion_hook ? 1338 pci_acpi_find_companion_hook(pci_dev) : NULL; 1339 1340 up_read(&pci_acpi_companion_lookup_sem); 1341 1342 if (adev) 1343 return adev; 1344 1345 check_children = pci_is_bridge(pci_dev); 1346 /* Please ref to ACPI spec for the syntax of _ADR */ 1347 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); 1348 adev = acpi_find_child_device(ACPI_COMPANION(dev->parent), addr, 1349 check_children); 1350 1351 /* 1352 * There may be ACPI device objects in the ACPI namespace that are 1353 * children of the device object representing the host bridge, but don't 1354 * represent PCI devices. Both _HID and _ADR may be present for them, 1355 * even though that is against the specification (for example, see 1356 * Section 6.1 of ACPI 6.3), but in many cases the _ADR returns 0 which 1357 * appears to indicate that they should not be taken into consideration 1358 * as potential companions of PCI devices on the root bus. 1359 * 1360 * To catch this special case, disregard the returned device object if 1361 * it has a valid _HID, addr is 0 and the PCI device at hand is on the 1362 * root bus. 1363 */ 1364 if (adev && adev->pnp.type.platform_id && !addr && 1365 pci_is_root_bus(pci_dev->bus)) 1366 return NULL; 1367 1368 return adev; 1369 } 1370 1371 /** 1372 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI 1373 * @pdev: the PCI device whose delay is to be updated 1374 * @handle: ACPI handle of this device 1375 * 1376 * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM 1377 * control method of either the device itself or the PCI host bridge. 1378 * 1379 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI 1380 * host bridge. If it returns one, the OS may assume that all devices in 1381 * the hierarchy have already completed power-on reset delays. 1382 * 1383 * Function 9, "Device Readiness Durations," applies only to the object 1384 * where it is located. It returns delay durations required after various 1385 * events if the device requires less time than the spec requires. Delays 1386 * from this function take precedence over the Reset Delay function. 1387 * 1388 * These _DSM functions are defined by the draft ECN of January 28, 2014, 1389 * titled "ACPI additions for FW latency optimizations." 1390 */ 1391 static void pci_acpi_optimize_delay(struct pci_dev *pdev, 1392 acpi_handle handle) 1393 { 1394 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); 1395 int value; 1396 union acpi_object *obj, *elements; 1397 1398 if (bridge->ignore_reset_delay) 1399 pdev->d3cold_delay = 0; 1400 1401 obj = acpi_evaluate_dsm_typed(handle, &pci_acpi_dsm_guid, 3, 1402 DSM_PCI_DEVICE_READINESS_DURATIONS, NULL, 1403 ACPI_TYPE_PACKAGE); 1404 if (!obj) 1405 return; 1406 1407 if (obj->package.count == 5) { 1408 elements = obj->package.elements; 1409 if (elements[0].type == ACPI_TYPE_INTEGER) { 1410 value = (int)elements[0].integer.value / 1000; 1411 if (value < PCI_PM_D3COLD_WAIT) 1412 pdev->d3cold_delay = value; 1413 } 1414 if (elements[3].type == ACPI_TYPE_INTEGER) { 1415 value = (int)elements[3].integer.value / 1000; 1416 if (value < PCI_PM_D3HOT_WAIT) 1417 pdev->d3hot_delay = value; 1418 } 1419 } 1420 ACPI_FREE(obj); 1421 } 1422 1423 static void pci_acpi_set_external_facing(struct pci_dev *dev) 1424 { 1425 u8 val; 1426 1427 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) 1428 return; 1429 if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val)) 1430 return; 1431 1432 /* 1433 * These root ports expose PCIe (including DMA) outside of the 1434 * system. Everything downstream from them is external. 1435 */ 1436 if (val) 1437 dev->external_facing = 1; 1438 } 1439 1440 void pci_acpi_setup(struct device *dev, struct acpi_device *adev) 1441 { 1442 struct pci_dev *pci_dev = to_pci_dev(dev); 1443 1444 pci_acpi_optimize_delay(pci_dev, adev->handle); 1445 pci_acpi_set_external_facing(pci_dev); 1446 pci_acpi_add_edr_notifier(pci_dev); 1447 1448 pci_acpi_add_pm_notifier(adev, pci_dev); 1449 if (!adev->wakeup.flags.valid) 1450 return; 1451 1452 device_set_wakeup_capable(dev, true); 1453 /* 1454 * For bridges that can do D3 we enable wake automatically (as 1455 * we do for the power management itself in that case). The 1456 * reason is that the bridge may have additional methods such as 1457 * _DSW that need to be called. 1458 */ 1459 if (pci_dev->bridge_d3) 1460 device_wakeup_enable(dev); 1461 1462 acpi_pci_wakeup(pci_dev, false); 1463 acpi_device_power_add_dependent(adev, dev); 1464 1465 if (pci_is_bridge(pci_dev)) 1466 acpi_dev_power_up_children_with_adr(adev); 1467 } 1468 1469 void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev) 1470 { 1471 struct pci_dev *pci_dev = to_pci_dev(dev); 1472 1473 pci_acpi_remove_edr_notifier(pci_dev); 1474 pci_acpi_remove_pm_notifier(adev); 1475 if (adev->wakeup.flags.valid) { 1476 acpi_device_power_remove_dependent(adev, dev); 1477 if (pci_dev->bridge_d3) 1478 device_wakeup_disable(dev); 1479 1480 device_set_wakeup_capable(dev, false); 1481 } 1482 } 1483 1484 static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev); 1485 1486 /** 1487 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode 1488 * @fn: Callback matching a device to a fwnode that identifies a PCI 1489 * MSI domain. 1490 * 1491 * This should be called by irqchip driver, which is the parent of 1492 * the MSI domain to provide callback interface to query fwnode. 1493 */ 1494 void 1495 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)) 1496 { 1497 pci_msi_get_fwnode_cb = fn; 1498 } 1499 1500 /** 1501 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge 1502 * @bus: The PCI host bridge bus. 1503 * 1504 * This function uses the callback function registered by 1505 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with 1506 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus. 1507 * This returns NULL on error or when the domain is not found. 1508 */ 1509 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) 1510 { 1511 struct fwnode_handle *fwnode; 1512 1513 if (!pci_msi_get_fwnode_cb) 1514 return NULL; 1515 1516 fwnode = pci_msi_get_fwnode_cb(&bus->dev); 1517 if (!fwnode) 1518 return NULL; 1519 1520 return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI); 1521 } 1522 1523 static int __init acpi_pci_init(void) 1524 { 1525 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) { 1526 pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n"); 1527 pci_no_msi(); 1528 } 1529 1530 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { 1531 pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); 1532 pcie_no_aspm(); 1533 } 1534 1535 if (acpi_pci_disabled) 1536 return 0; 1537 1538 acpi_pci_slot_init(); 1539 acpiphp_init(); 1540 1541 return 0; 1542 } 1543 arch_initcall(acpi_pci_init); 1544