1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI support in ACPI 4 * 5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com> 6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com> 7 * Copyright (C) 2004 Intel Corp. 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/init.h> 12 #include <linux/irqdomain.h> 13 #include <linux/pci.h> 14 #include <linux/msi.h> 15 #include <linux/pci_hotplug.h> 16 #include <linux/module.h> 17 #include <linux/pci-acpi.h> 18 #include <linux/pci-ecam.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/pm_qos.h> 21 #include <linux/rwsem.h> 22 #include "pci.h" 23 24 /* 25 * The GUID is defined in the PCI Firmware Specification available 26 * here to PCI-SIG members: 27 * https://members.pcisig.com/wg/PCI-SIG/document/15350 28 */ 29 const guid_t pci_acpi_dsm_guid = 30 GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a, 31 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d); 32 33 #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) 34 static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) 35 { 36 struct device *dev = &adev->dev; 37 struct resource_entry *entry; 38 struct list_head list; 39 unsigned long flags; 40 int ret; 41 42 INIT_LIST_HEAD(&list); 43 flags = IORESOURCE_MEM; 44 ret = acpi_dev_get_resources(adev, &list, 45 acpi_dev_filter_resource_type_cb, 46 (void *) flags); 47 if (ret < 0) { 48 dev_err(dev, "failed to parse _CRS method, error code %d\n", 49 ret); 50 return ret; 51 } 52 53 if (ret == 0) { 54 dev_err(dev, "no IO and memory resources present in _CRS\n"); 55 return -EINVAL; 56 } 57 58 entry = list_first_entry(&list, struct resource_entry, node); 59 *res = *entry->res; 60 acpi_dev_free_resource_list(&list); 61 return 0; 62 } 63 64 static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context, 65 void **retval) 66 { 67 u16 *segment = context; 68 unsigned long long uid; 69 acpi_status status; 70 71 status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid); 72 if (ACPI_FAILURE(status) || uid != *segment) 73 return AE_CTRL_DEPTH; 74 75 *(acpi_handle *)retval = handle; 76 return AE_CTRL_TERMINATE; 77 } 78 79 int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, 80 struct resource *res) 81 { 82 struct acpi_device *adev; 83 acpi_status status; 84 acpi_handle handle; 85 int ret; 86 87 status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle); 88 if (ACPI_FAILURE(status)) { 89 dev_err(dev, "can't find _HID %s device to locate resources\n", 90 hid); 91 return -ENODEV; 92 } 93 94 adev = acpi_fetch_acpi_dev(handle); 95 if (!adev) 96 return -ENODEV; 97 98 ret = acpi_get_rc_addr(adev, res); 99 if (ret) { 100 dev_err(dev, "can't get resource from %s\n", 101 dev_name(&adev->dev)); 102 return ret; 103 } 104 105 return 0; 106 } 107 #endif 108 109 phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) 110 { 111 acpi_status status = AE_NOT_EXIST; 112 unsigned long long mcfg_addr; 113 114 if (handle) 115 status = acpi_evaluate_integer(handle, METHOD_NAME__CBA, 116 NULL, &mcfg_addr); 117 if (ACPI_FAILURE(status)) 118 return 0; 119 120 return (phys_addr_t)mcfg_addr; 121 } 122 123 bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge) 124 { 125 bool ret = false; 126 127 if (ACPI_HANDLE(&host_bridge->dev)) { 128 union acpi_object *obj; 129 130 /* 131 * Evaluate the "PCI Boot Configuration" _DSM Function. If it 132 * exists and returns 0, we must preserve any PCI resource 133 * assignments made by firmware for this host bridge. 134 */ 135 obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(&host_bridge->dev), 136 &pci_acpi_dsm_guid, 137 1, DSM_PCI_PRESERVE_BOOT_CONFIG, 138 NULL, ACPI_TYPE_INTEGER); 139 if (obj && obj->integer.value == 0) 140 ret = true; 141 ACPI_FREE(obj); 142 } 143 144 return ret; 145 } 146 147 /* _HPX PCI Setting Record (Type 0); same as _HPP */ 148 struct hpx_type0 { 149 u32 revision; /* Not present in _HPP */ 150 u8 cache_line_size; /* Not applicable to PCIe */ 151 u8 latency_timer; /* Not applicable to PCIe */ 152 u8 enable_serr; 153 u8 enable_perr; 154 }; 155 156 static struct hpx_type0 pci_default_type0 = { 157 .revision = 1, 158 .cache_line_size = 8, 159 .latency_timer = 0x40, 160 .enable_serr = 0, 161 .enable_perr = 0, 162 }; 163 164 static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx) 165 { 166 u16 pci_cmd, pci_bctl; 167 168 if (!hpx) 169 hpx = &pci_default_type0; 170 171 if (hpx->revision > 1) { 172 pci_warn(dev, "PCI settings rev %d not supported; using defaults\n", 173 hpx->revision); 174 hpx = &pci_default_type0; 175 } 176 177 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size); 178 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer); 179 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); 180 if (hpx->enable_serr) 181 pci_cmd |= PCI_COMMAND_SERR; 182 if (hpx->enable_perr) 183 pci_cmd |= PCI_COMMAND_PARITY; 184 pci_write_config_word(dev, PCI_COMMAND, pci_cmd); 185 186 /* Program bridge control value */ 187 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 188 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 189 hpx->latency_timer); 190 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); 191 if (hpx->enable_perr) 192 pci_bctl |= PCI_BRIDGE_CTL_PARITY; 193 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); 194 } 195 } 196 197 static acpi_status decode_type0_hpx_record(union acpi_object *record, 198 struct hpx_type0 *hpx0) 199 { 200 int i; 201 union acpi_object *fields = record->package.elements; 202 u32 revision = fields[1].integer.value; 203 204 switch (revision) { 205 case 1: 206 if (record->package.count != 6) 207 return AE_ERROR; 208 for (i = 2; i < 6; i++) 209 if (fields[i].type != ACPI_TYPE_INTEGER) 210 return AE_ERROR; 211 hpx0->revision = revision; 212 hpx0->cache_line_size = fields[2].integer.value; 213 hpx0->latency_timer = fields[3].integer.value; 214 hpx0->enable_serr = fields[4].integer.value; 215 hpx0->enable_perr = fields[5].integer.value; 216 break; 217 default: 218 pr_warn("%s: Type 0 Revision %d record not supported\n", 219 __func__, revision); 220 return AE_ERROR; 221 } 222 return AE_OK; 223 } 224 225 /* _HPX PCI-X Setting Record (Type 1) */ 226 struct hpx_type1 { 227 u32 revision; 228 u8 max_mem_read; 229 u8 avg_max_split; 230 u16 tot_max_split; 231 }; 232 233 static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx) 234 { 235 int pos; 236 237 if (!hpx) 238 return; 239 240 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 241 if (!pos) 242 return; 243 244 pci_warn(dev, "PCI-X settings not supported\n"); 245 } 246 247 static acpi_status decode_type1_hpx_record(union acpi_object *record, 248 struct hpx_type1 *hpx1) 249 { 250 int i; 251 union acpi_object *fields = record->package.elements; 252 u32 revision = fields[1].integer.value; 253 254 switch (revision) { 255 case 1: 256 if (record->package.count != 5) 257 return AE_ERROR; 258 for (i = 2; i < 5; i++) 259 if (fields[i].type != ACPI_TYPE_INTEGER) 260 return AE_ERROR; 261 hpx1->revision = revision; 262 hpx1->max_mem_read = fields[2].integer.value; 263 hpx1->avg_max_split = fields[3].integer.value; 264 hpx1->tot_max_split = fields[4].integer.value; 265 break; 266 default: 267 pr_warn("%s: Type 1 Revision %d record not supported\n", 268 __func__, revision); 269 return AE_ERROR; 270 } 271 return AE_OK; 272 } 273 274 static bool pcie_root_rcb_set(struct pci_dev *dev) 275 { 276 struct pci_dev *rp = pcie_find_root_port(dev); 277 u16 lnkctl; 278 279 if (!rp) 280 return false; 281 282 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); 283 if (lnkctl & PCI_EXP_LNKCTL_RCB) 284 return true; 285 286 return false; 287 } 288 289 /* _HPX PCI Express Setting Record (Type 2) */ 290 struct hpx_type2 { 291 u32 revision; 292 u32 unc_err_mask_and; 293 u32 unc_err_mask_or; 294 u32 unc_err_sever_and; 295 u32 unc_err_sever_or; 296 u32 cor_err_mask_and; 297 u32 cor_err_mask_or; 298 u32 adv_err_cap_and; 299 u32 adv_err_cap_or; 300 u16 pci_exp_devctl_and; 301 u16 pci_exp_devctl_or; 302 u16 pci_exp_lnkctl_and; 303 u16 pci_exp_lnkctl_or; 304 u32 sec_unc_err_sever_and; 305 u32 sec_unc_err_sever_or; 306 u32 sec_unc_err_mask_and; 307 u32 sec_unc_err_mask_or; 308 }; 309 310 static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx) 311 { 312 int pos; 313 u32 reg32; 314 315 if (!hpx) 316 return; 317 318 if (!pci_is_pcie(dev)) 319 return; 320 321 if (hpx->revision > 1) { 322 pci_warn(dev, "PCIe settings rev %d not supported\n", 323 hpx->revision); 324 return; 325 } 326 327 /* 328 * Don't allow _HPX to change MPS or MRRS settings. We manage 329 * those to make sure they're consistent with the rest of the 330 * platform. 331 */ 332 hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | 333 PCI_EXP_DEVCTL_READRQ; 334 hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | 335 PCI_EXP_DEVCTL_READRQ); 336 337 /* Initialize Device Control Register */ 338 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 339 ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or); 340 341 /* Initialize Link Control Register */ 342 if (pcie_cap_has_lnkctl(dev)) { 343 344 /* 345 * If the Root Port supports Read Completion Boundary of 346 * 128, set RCB to 128. Otherwise, clear it. 347 */ 348 hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; 349 hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; 350 if (pcie_root_rcb_set(dev)) 351 hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; 352 353 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, 354 ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or); 355 } 356 357 /* Find Advanced Error Reporting Enhanced Capability */ 358 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 359 if (!pos) 360 return; 361 362 /* Initialize Uncorrectable Error Mask Register */ 363 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); 364 reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or; 365 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); 366 367 /* Initialize Uncorrectable Error Severity Register */ 368 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); 369 reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or; 370 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); 371 372 /* Initialize Correctable Error Mask Register */ 373 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); 374 reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or; 375 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); 376 377 /* Initialize Advanced Error Capabilities and Control Register */ 378 pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); 379 reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or; 380 381 /* Don't enable ECRC generation or checking if unsupported */ 382 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) 383 reg32 &= ~PCI_ERR_CAP_ECRC_GENE; 384 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) 385 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; 386 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); 387 388 /* 389 * FIXME: The following two registers are not supported yet. 390 * 391 * o Secondary Uncorrectable Error Severity Register 392 * o Secondary Uncorrectable Error Mask Register 393 */ 394 } 395 396 static acpi_status decode_type2_hpx_record(union acpi_object *record, 397 struct hpx_type2 *hpx2) 398 { 399 int i; 400 union acpi_object *fields = record->package.elements; 401 u32 revision = fields[1].integer.value; 402 403 switch (revision) { 404 case 1: 405 if (record->package.count != 18) 406 return AE_ERROR; 407 for (i = 2; i < 18; i++) 408 if (fields[i].type != ACPI_TYPE_INTEGER) 409 return AE_ERROR; 410 hpx2->revision = revision; 411 hpx2->unc_err_mask_and = fields[2].integer.value; 412 hpx2->unc_err_mask_or = fields[3].integer.value; 413 hpx2->unc_err_sever_and = fields[4].integer.value; 414 hpx2->unc_err_sever_or = fields[5].integer.value; 415 hpx2->cor_err_mask_and = fields[6].integer.value; 416 hpx2->cor_err_mask_or = fields[7].integer.value; 417 hpx2->adv_err_cap_and = fields[8].integer.value; 418 hpx2->adv_err_cap_or = fields[9].integer.value; 419 hpx2->pci_exp_devctl_and = fields[10].integer.value; 420 hpx2->pci_exp_devctl_or = fields[11].integer.value; 421 hpx2->pci_exp_lnkctl_and = fields[12].integer.value; 422 hpx2->pci_exp_lnkctl_or = fields[13].integer.value; 423 hpx2->sec_unc_err_sever_and = fields[14].integer.value; 424 hpx2->sec_unc_err_sever_or = fields[15].integer.value; 425 hpx2->sec_unc_err_mask_and = fields[16].integer.value; 426 hpx2->sec_unc_err_mask_or = fields[17].integer.value; 427 break; 428 default: 429 pr_warn("%s: Type 2 Revision %d record not supported\n", 430 __func__, revision); 431 return AE_ERROR; 432 } 433 return AE_OK; 434 } 435 436 /* _HPX PCI Express Setting Record (Type 3) */ 437 struct hpx_type3 { 438 u16 device_type; 439 u16 function_type; 440 u16 config_space_location; 441 u16 pci_exp_cap_id; 442 u16 pci_exp_cap_ver; 443 u16 pci_exp_vendor_id; 444 u16 dvsec_id; 445 u16 dvsec_rev; 446 u16 match_offset; 447 u32 match_mask_and; 448 u32 match_value; 449 u16 reg_offset; 450 u32 reg_mask_and; 451 u32 reg_mask_or; 452 }; 453 454 enum hpx_type3_dev_type { 455 HPX_TYPE_ENDPOINT = BIT(0), 456 HPX_TYPE_LEG_END = BIT(1), 457 HPX_TYPE_RC_END = BIT(2), 458 HPX_TYPE_RC_EC = BIT(3), 459 HPX_TYPE_ROOT_PORT = BIT(4), 460 HPX_TYPE_UPSTREAM = BIT(5), 461 HPX_TYPE_DOWNSTREAM = BIT(6), 462 HPX_TYPE_PCI_BRIDGE = BIT(7), 463 HPX_TYPE_PCIE_BRIDGE = BIT(8), 464 }; 465 466 static u16 hpx3_device_type(struct pci_dev *dev) 467 { 468 u16 pcie_type = pci_pcie_type(dev); 469 static const int pcie_to_hpx3_type[] = { 470 [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT, 471 [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END, 472 [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END, 473 [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC, 474 [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT, 475 [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM, 476 [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM, 477 [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE, 478 [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE, 479 }; 480 481 if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type)) 482 return 0; 483 484 return pcie_to_hpx3_type[pcie_type]; 485 } 486 487 enum hpx_type3_fn_type { 488 HPX_FN_NORMAL = BIT(0), 489 HPX_FN_SRIOV_PHYS = BIT(1), 490 HPX_FN_SRIOV_VIRT = BIT(2), 491 }; 492 493 static u8 hpx3_function_type(struct pci_dev *dev) 494 { 495 if (dev->is_virtfn) 496 return HPX_FN_SRIOV_VIRT; 497 else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0) 498 return HPX_FN_SRIOV_PHYS; 499 else 500 return HPX_FN_NORMAL; 501 } 502 503 static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id) 504 { 505 u8 cap_ver = hpx3_cap_id & 0xf; 506 507 if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id) 508 return true; 509 else if (cap_ver == pcie_cap_id) 510 return true; 511 512 return false; 513 } 514 515 enum hpx_type3_cfg_loc { 516 HPX_CFG_PCICFG = 0, 517 HPX_CFG_PCIE_CAP = 1, 518 HPX_CFG_PCIE_CAP_EXT = 2, 519 HPX_CFG_VEND_CAP = 3, 520 HPX_CFG_DVSEC = 4, 521 HPX_CFG_MAX, 522 }; 523 524 static void program_hpx_type3_register(struct pci_dev *dev, 525 const struct hpx_type3 *reg) 526 { 527 u32 match_reg, write_reg, header, orig_value; 528 u16 pos; 529 530 if (!(hpx3_device_type(dev) & reg->device_type)) 531 return; 532 533 if (!(hpx3_function_type(dev) & reg->function_type)) 534 return; 535 536 switch (reg->config_space_location) { 537 case HPX_CFG_PCICFG: 538 pos = 0; 539 break; 540 case HPX_CFG_PCIE_CAP: 541 pos = pci_find_capability(dev, reg->pci_exp_cap_id); 542 if (pos == 0) 543 return; 544 545 break; 546 case HPX_CFG_PCIE_CAP_EXT: 547 pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id); 548 if (pos == 0) 549 return; 550 551 pci_read_config_dword(dev, pos, &header); 552 if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header), 553 reg->pci_exp_cap_ver)) 554 return; 555 556 break; 557 case HPX_CFG_VEND_CAP: 558 case HPX_CFG_DVSEC: 559 default: 560 pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location"); 561 return; 562 } 563 564 pci_read_config_dword(dev, pos + reg->match_offset, &match_reg); 565 566 if ((match_reg & reg->match_mask_and) != reg->match_value) 567 return; 568 569 pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg); 570 orig_value = write_reg; 571 write_reg &= reg->reg_mask_and; 572 write_reg |= reg->reg_mask_or; 573 574 if (orig_value == write_reg) 575 return; 576 577 pci_write_config_dword(dev, pos + reg->reg_offset, write_reg); 578 579 pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x", 580 pos, orig_value, write_reg); 581 } 582 583 static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx) 584 { 585 if (!hpx) 586 return; 587 588 if (!pci_is_pcie(dev)) 589 return; 590 591 program_hpx_type3_register(dev, hpx); 592 } 593 594 static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, 595 union acpi_object *reg_fields) 596 { 597 hpx3_reg->device_type = reg_fields[0].integer.value; 598 hpx3_reg->function_type = reg_fields[1].integer.value; 599 hpx3_reg->config_space_location = reg_fields[2].integer.value; 600 hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value; 601 hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value; 602 hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value; 603 hpx3_reg->dvsec_id = reg_fields[6].integer.value; 604 hpx3_reg->dvsec_rev = reg_fields[7].integer.value; 605 hpx3_reg->match_offset = reg_fields[8].integer.value; 606 hpx3_reg->match_mask_and = reg_fields[9].integer.value; 607 hpx3_reg->match_value = reg_fields[10].integer.value; 608 hpx3_reg->reg_offset = reg_fields[11].integer.value; 609 hpx3_reg->reg_mask_and = reg_fields[12].integer.value; 610 hpx3_reg->reg_mask_or = reg_fields[13].integer.value; 611 } 612 613 static acpi_status program_type3_hpx_record(struct pci_dev *dev, 614 union acpi_object *record) 615 { 616 union acpi_object *fields = record->package.elements; 617 u32 desc_count, expected_length, revision; 618 union acpi_object *reg_fields; 619 struct hpx_type3 hpx3; 620 int i; 621 622 revision = fields[1].integer.value; 623 switch (revision) { 624 case 1: 625 desc_count = fields[2].integer.value; 626 expected_length = 3 + desc_count * 14; 627 628 if (record->package.count != expected_length) 629 return AE_ERROR; 630 631 for (i = 2; i < expected_length; i++) 632 if (fields[i].type != ACPI_TYPE_INTEGER) 633 return AE_ERROR; 634 635 for (i = 0; i < desc_count; i++) { 636 reg_fields = fields + 3 + i * 14; 637 parse_hpx3_register(&hpx3, reg_fields); 638 program_hpx_type3(dev, &hpx3); 639 } 640 641 break; 642 default: 643 printk(KERN_WARNING 644 "%s: Type 3 Revision %d record not supported\n", 645 __func__, revision); 646 return AE_ERROR; 647 } 648 return AE_OK; 649 } 650 651 static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle) 652 { 653 acpi_status status; 654 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 655 union acpi_object *package, *record, *fields; 656 struct hpx_type0 hpx0; 657 struct hpx_type1 hpx1; 658 struct hpx_type2 hpx2; 659 u32 type; 660 int i; 661 662 status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); 663 if (ACPI_FAILURE(status)) 664 return status; 665 666 package = (union acpi_object *)buffer.pointer; 667 if (package->type != ACPI_TYPE_PACKAGE) { 668 status = AE_ERROR; 669 goto exit; 670 } 671 672 for (i = 0; i < package->package.count; i++) { 673 record = &package->package.elements[i]; 674 if (record->type != ACPI_TYPE_PACKAGE) { 675 status = AE_ERROR; 676 goto exit; 677 } 678 679 fields = record->package.elements; 680 if (fields[0].type != ACPI_TYPE_INTEGER || 681 fields[1].type != ACPI_TYPE_INTEGER) { 682 status = AE_ERROR; 683 goto exit; 684 } 685 686 type = fields[0].integer.value; 687 switch (type) { 688 case 0: 689 memset(&hpx0, 0, sizeof(hpx0)); 690 status = decode_type0_hpx_record(record, &hpx0); 691 if (ACPI_FAILURE(status)) 692 goto exit; 693 program_hpx_type0(dev, &hpx0); 694 break; 695 case 1: 696 memset(&hpx1, 0, sizeof(hpx1)); 697 status = decode_type1_hpx_record(record, &hpx1); 698 if (ACPI_FAILURE(status)) 699 goto exit; 700 program_hpx_type1(dev, &hpx1); 701 break; 702 case 2: 703 memset(&hpx2, 0, sizeof(hpx2)); 704 status = decode_type2_hpx_record(record, &hpx2); 705 if (ACPI_FAILURE(status)) 706 goto exit; 707 program_hpx_type2(dev, &hpx2); 708 break; 709 case 3: 710 status = program_type3_hpx_record(dev, record); 711 if (ACPI_FAILURE(status)) 712 goto exit; 713 break; 714 default: 715 pr_err("%s: Type %d record not supported\n", 716 __func__, type); 717 status = AE_ERROR; 718 goto exit; 719 } 720 } 721 exit: 722 kfree(buffer.pointer); 723 return status; 724 } 725 726 static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle) 727 { 728 acpi_status status; 729 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 730 union acpi_object *package, *fields; 731 struct hpx_type0 hpx0; 732 int i; 733 734 memset(&hpx0, 0, sizeof(hpx0)); 735 736 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); 737 if (ACPI_FAILURE(status)) 738 return status; 739 740 package = (union acpi_object *) buffer.pointer; 741 if (package->type != ACPI_TYPE_PACKAGE || 742 package->package.count != 4) { 743 status = AE_ERROR; 744 goto exit; 745 } 746 747 fields = package->package.elements; 748 for (i = 0; i < 4; i++) { 749 if (fields[i].type != ACPI_TYPE_INTEGER) { 750 status = AE_ERROR; 751 goto exit; 752 } 753 } 754 755 hpx0.revision = 1; 756 hpx0.cache_line_size = fields[0].integer.value; 757 hpx0.latency_timer = fields[1].integer.value; 758 hpx0.enable_serr = fields[2].integer.value; 759 hpx0.enable_perr = fields[3].integer.value; 760 761 program_hpx_type0(dev, &hpx0); 762 763 exit: 764 kfree(buffer.pointer); 765 return status; 766 } 767 768 /* pci_acpi_program_hp_params 769 * 770 * @dev - the pci_dev for which we want parameters 771 */ 772 int pci_acpi_program_hp_params(struct pci_dev *dev) 773 { 774 acpi_status status; 775 acpi_handle handle, phandle; 776 struct pci_bus *pbus; 777 778 if (acpi_pci_disabled) 779 return -ENODEV; 780 781 handle = NULL; 782 for (pbus = dev->bus; pbus; pbus = pbus->parent) { 783 handle = acpi_pci_get_bridge_handle(pbus); 784 if (handle) 785 break; 786 } 787 788 /* 789 * _HPP settings apply to all child buses, until another _HPP is 790 * encountered. If we don't find an _HPP for the input pci dev, 791 * look for it in the parent device scope since that would apply to 792 * this pci dev. 793 */ 794 while (handle) { 795 status = acpi_run_hpx(dev, handle); 796 if (ACPI_SUCCESS(status)) 797 return 0; 798 status = acpi_run_hpp(dev, handle); 799 if (ACPI_SUCCESS(status)) 800 return 0; 801 if (acpi_is_root_bridge(handle)) 802 break; 803 status = acpi_get_parent(handle, &phandle); 804 if (ACPI_FAILURE(status)) 805 break; 806 handle = phandle; 807 } 808 return -ENODEV; 809 } 810 811 /** 812 * pciehp_is_native - Check whether a hotplug port is handled by the OS 813 * @bridge: Hotplug port to check 814 * 815 * Returns true if the given @bridge is handled by the native PCIe hotplug 816 * driver. 817 */ 818 bool pciehp_is_native(struct pci_dev *bridge) 819 { 820 const struct pci_host_bridge *host; 821 822 if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) 823 return false; 824 825 if (pcie_ports_native) 826 return true; 827 828 host = pci_find_host_bridge(bridge->bus); 829 return host->native_pcie_hotplug; 830 } 831 832 /** 833 * shpchp_is_native - Check whether a hotplug port is handled by the OS 834 * @bridge: Hotplug port to check 835 * 836 * Returns true if the given @bridge is handled by the native SHPC hotplug 837 * driver. 838 */ 839 bool shpchp_is_native(struct pci_dev *bridge) 840 { 841 return bridge->shpc_managed; 842 } 843 844 /** 845 * pci_acpi_wake_bus - Root bus wakeup notification fork function. 846 * @context: Device wakeup context. 847 */ 848 static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context) 849 { 850 pci_pme_wakeup_bus(to_pci_host_bridge(context->dev)->bus); 851 } 852 853 /** 854 * pci_acpi_wake_dev - PCI device wakeup notification work function. 855 * @context: Device wakeup context. 856 */ 857 static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context) 858 { 859 struct pci_dev *pci_dev; 860 861 pci_dev = to_pci_dev(context->dev); 862 863 if (pci_dev->pme_poll) 864 pci_dev->pme_poll = false; 865 866 if (pci_dev->current_state == PCI_D3cold) { 867 pci_wakeup_event(pci_dev); 868 pm_request_resume(&pci_dev->dev); 869 return; 870 } 871 872 /* Clear PME Status if set. */ 873 if (pci_dev->pme_support) 874 pci_check_pme_status(pci_dev); 875 876 pci_wakeup_event(pci_dev); 877 pm_request_resume(&pci_dev->dev); 878 879 pci_pme_wakeup_bus(pci_dev->subordinate); 880 } 881 882 /** 883 * pci_acpi_add_root_pm_notifier - Register PM notifier for root PCI bus. 884 * @dev: PCI root bridge ACPI device. 885 * @root: PCI root corresponding to @dev. 886 */ 887 acpi_status pci_acpi_add_root_pm_notifier(struct acpi_device *dev, 888 struct acpi_pci_root *root) 889 { 890 return acpi_add_pm_notifier(dev, root->bus->bridge, pci_acpi_wake_bus); 891 } 892 893 /** 894 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. 895 * @dev: ACPI device to add the notifier for. 896 * @pci_dev: PCI device to check for the PME status if an event is signaled. 897 */ 898 acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, 899 struct pci_dev *pci_dev) 900 { 901 return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev); 902 } 903 904 /* 905 * _SxD returns the D-state with the highest power 906 * (lowest D-state number) supported in the S-state "x". 907 * 908 * If the devices does not have a _PRW 909 * (Power Resources for Wake) supporting system wakeup from "x" 910 * then the OS is free to choose a lower power (higher number 911 * D-state) than the return value from _SxD. 912 * 913 * But if _PRW is enabled at S-state "x", the OS 914 * must not choose a power lower than _SxD -- 915 * unless the device has an _SxW method specifying 916 * the lowest power (highest D-state number) the device 917 * may enter while still able to wake the system. 918 * 919 * ie. depending on global OS policy: 920 * 921 * if (_PRW at S-state x) 922 * choose from highest power _SxD to lowest power _SxW 923 * else // no _PRW at S-state x 924 * choose highest power _SxD or any lower power 925 */ 926 927 pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) 928 { 929 int acpi_state, d_max; 930 931 if (pdev->no_d3cold || !pdev->d3cold_allowed) 932 d_max = ACPI_STATE_D3_HOT; 933 else 934 d_max = ACPI_STATE_D3_COLD; 935 acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max); 936 if (acpi_state < 0) 937 return PCI_POWER_ERROR; 938 939 switch (acpi_state) { 940 case ACPI_STATE_D0: 941 return PCI_D0; 942 case ACPI_STATE_D1: 943 return PCI_D1; 944 case ACPI_STATE_D2: 945 return PCI_D2; 946 case ACPI_STATE_D3_HOT: 947 return PCI_D3hot; 948 case ACPI_STATE_D3_COLD: 949 return PCI_D3cold; 950 } 951 return PCI_POWER_ERROR; 952 } 953 954 static struct acpi_device *acpi_pci_find_companion(struct device *dev); 955 956 void pci_set_acpi_fwnode(struct pci_dev *dev) 957 { 958 if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev)) 959 ACPI_COMPANION_SET(&dev->dev, 960 acpi_pci_find_companion(&dev->dev)); 961 } 962 963 /** 964 * pci_dev_acpi_reset - do a function level reset using _RST method 965 * @dev: device to reset 966 * @probe: if true, return 0 if device supports _RST 967 */ 968 int pci_dev_acpi_reset(struct pci_dev *dev, bool probe) 969 { 970 acpi_handle handle = ACPI_HANDLE(&dev->dev); 971 972 if (!handle || !acpi_has_method(handle, "_RST")) 973 return -ENOTTY; 974 975 if (probe) 976 return 0; 977 978 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, NULL))) { 979 pci_warn(dev, "ACPI _RST failed\n"); 980 return -ENOTTY; 981 } 982 983 return 0; 984 } 985 986 bool acpi_pci_power_manageable(struct pci_dev *dev) 987 { 988 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 989 990 return adev && acpi_device_power_manageable(adev); 991 } 992 993 bool acpi_pci_bridge_d3(struct pci_dev *dev) 994 { 995 struct pci_dev *rpdev; 996 struct acpi_device *adev, *rpadev; 997 const union acpi_object *obj; 998 999 if (acpi_pci_disabled || !dev->is_pciehp) 1000 return false; 1001 1002 adev = ACPI_COMPANION(&dev->dev); 1003 if (adev) { 1004 /* 1005 * If the bridge has _S0W, whether or not it can go into D3 1006 * depends on what is returned by that object. In particular, 1007 * if the power state returned by _S0W is D2 or shallower, 1008 * entering D3 should not be allowed. 1009 */ 1010 if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2) 1011 return false; 1012 1013 /* 1014 * Otherwise, assume that the bridge can enter D3 so long as it 1015 * is power-manageable via ACPI. 1016 */ 1017 if (acpi_device_power_manageable(adev)) 1018 return true; 1019 } 1020 1021 rpdev = pcie_find_root_port(dev); 1022 if (!rpdev) 1023 return false; 1024 1025 if (rpdev == dev) 1026 rpadev = adev; 1027 else 1028 rpadev = ACPI_COMPANION(&rpdev->dev); 1029 1030 if (!rpadev) 1031 return false; 1032 1033 /* 1034 * If the Root Port cannot signal wakeup signals at all, i.e., it 1035 * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug 1036 * events from low-power states including D3hot and D3cold. 1037 */ 1038 if (!rpadev->wakeup.flags.valid) 1039 return false; 1040 1041 /* 1042 * In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port 1043 * to verify whether or not it can signal wakeup from D3. 1044 */ 1045 if (rpadev != adev && 1046 acpi_dev_power_state_for_wake(rpadev) <= ACPI_STATE_D2) 1047 return false; 1048 1049 /* 1050 * The "HotPlugSupportInD3" property in a Root Port _DSD indicates 1051 * the Port can signal hotplug events while in D3. We assume any 1052 * bridges *below* that Root Port can also signal hotplug events 1053 * while in D3. 1054 */ 1055 if (!acpi_dev_get_property(rpadev, "HotPlugSupportInD3", 1056 ACPI_TYPE_INTEGER, &obj) && 1057 obj->integer.value == 1) 1058 return true; 1059 1060 return false; 1061 } 1062 1063 static void acpi_pci_config_space_access(struct pci_dev *dev, bool enable) 1064 { 1065 int val = enable ? ACPI_REG_CONNECT : ACPI_REG_DISCONNECT; 1066 int ret = acpi_evaluate_reg(ACPI_HANDLE(&dev->dev), 1067 ACPI_ADR_SPACE_PCI_CONFIG, val); 1068 if (ret) 1069 pci_dbg(dev, "ACPI _REG %s evaluation failed (%d)\n", 1070 enable ? "connect" : "disconnect", ret); 1071 } 1072 1073 int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) 1074 { 1075 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 1076 static const u8 state_conv[] = { 1077 [PCI_D0] = ACPI_STATE_D0, 1078 [PCI_D1] = ACPI_STATE_D1, 1079 [PCI_D2] = ACPI_STATE_D2, 1080 [PCI_D3hot] = ACPI_STATE_D3_HOT, 1081 [PCI_D3cold] = ACPI_STATE_D3_COLD, 1082 }; 1083 int error; 1084 1085 /* If the ACPI device has _EJ0, ignore the device */ 1086 if (!adev || acpi_has_method(adev->handle, "_EJ0")) 1087 return -ENODEV; 1088 1089 switch (state) { 1090 case PCI_D0: 1091 case PCI_D1: 1092 case PCI_D2: 1093 case PCI_D3hot: 1094 case PCI_D3cold: 1095 break; 1096 default: 1097 return -EINVAL; 1098 } 1099 1100 if (state == PCI_D3cold) { 1101 if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == 1102 PM_QOS_FLAGS_ALL) 1103 return -EBUSY; 1104 1105 /* Notify AML lack of PCI config space availability */ 1106 acpi_pci_config_space_access(dev, false); 1107 } 1108 1109 error = acpi_device_set_power(adev, state_conv[state]); 1110 if (error) 1111 return error; 1112 1113 pci_dbg(dev, "power state changed by ACPI to %s\n", 1114 acpi_power_state_string(adev->power.state)); 1115 1116 /* 1117 * Notify AML of PCI config space availability. Config space is 1118 * accessible in all states except D3cold; the only transitions 1119 * that change availability are transitions to D3cold and from 1120 * D3cold to D0. 1121 */ 1122 if (state == PCI_D0) 1123 acpi_pci_config_space_access(dev, true); 1124 1125 return 0; 1126 } 1127 1128 pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) 1129 { 1130 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 1131 static const pci_power_t state_conv[] = { 1132 [ACPI_STATE_D0] = PCI_D0, 1133 [ACPI_STATE_D1] = PCI_D1, 1134 [ACPI_STATE_D2] = PCI_D2, 1135 [ACPI_STATE_D3_HOT] = PCI_D3hot, 1136 [ACPI_STATE_D3_COLD] = PCI_D3cold, 1137 }; 1138 int state; 1139 1140 if (!adev || !acpi_device_power_manageable(adev)) 1141 return PCI_UNKNOWN; 1142 1143 state = adev->power.state; 1144 if (state == ACPI_STATE_UNKNOWN) 1145 return PCI_UNKNOWN; 1146 1147 return state_conv[state]; 1148 } 1149 1150 void acpi_pci_refresh_power_state(struct pci_dev *dev) 1151 { 1152 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 1153 1154 if (adev && acpi_device_power_manageable(adev)) 1155 acpi_device_update_power(adev, NULL); 1156 } 1157 1158 static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) 1159 { 1160 while (bus->parent) { 1161 if (acpi_pm_device_can_wakeup(&bus->self->dev)) 1162 return acpi_pm_set_device_wakeup(&bus->self->dev, enable); 1163 1164 bus = bus->parent; 1165 } 1166 1167 /* We have reached the root bus. */ 1168 if (bus->bridge) { 1169 if (acpi_pm_device_can_wakeup(bus->bridge)) 1170 return acpi_pm_set_device_wakeup(bus->bridge, enable); 1171 } 1172 return 0; 1173 } 1174 1175 int acpi_pci_wakeup(struct pci_dev *dev, bool enable) 1176 { 1177 if (acpi_pci_disabled) 1178 return 0; 1179 1180 if (acpi_pm_device_can_wakeup(&dev->dev)) 1181 return acpi_pm_set_device_wakeup(&dev->dev, enable); 1182 1183 return acpi_pci_propagate_wakeup(dev->bus, enable); 1184 } 1185 1186 bool acpi_pci_need_resume(struct pci_dev *dev) 1187 { 1188 struct acpi_device *adev; 1189 1190 if (acpi_pci_disabled) 1191 return false; 1192 1193 /* 1194 * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over 1195 * system-wide suspend/resume confuses the platform firmware, so avoid 1196 * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint 1197 * devices are expected to be in D3 before invoking the S3 entry path 1198 * from the firmware, so they should not be affected by this issue. 1199 */ 1200 if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0) 1201 return true; 1202 1203 adev = ACPI_COMPANION(&dev->dev); 1204 if (!adev || !acpi_device_power_manageable(adev)) 1205 return false; 1206 1207 if (adev->wakeup.flags.valid && 1208 device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) 1209 return true; 1210 1211 if (acpi_target_system_state() == ACPI_STATE_S0) 1212 return false; 1213 1214 return !!adev->power.flags.dsw_present; 1215 } 1216 1217 void acpi_pci_add_bus(struct pci_bus *bus) 1218 { 1219 union acpi_object *obj; 1220 struct pci_host_bridge *bridge; 1221 1222 if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) 1223 return; 1224 1225 acpi_pci_slot_enumerate(bus); 1226 acpiphp_enumerate_slots(bus); 1227 1228 /* 1229 * For a host bridge, check its _DSM for function 8 and if 1230 * that is available, mark it in pci_host_bridge. 1231 */ 1232 if (!pci_is_root_bus(bus)) 1233 return; 1234 1235 obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3, 1236 DSM_PCI_POWER_ON_RESET_DELAY, NULL, ACPI_TYPE_INTEGER); 1237 if (!obj) 1238 return; 1239 1240 if (obj->integer.value == 1) { 1241 bridge = pci_find_host_bridge(bus); 1242 bridge->ignore_reset_delay = 1; 1243 } 1244 ACPI_FREE(obj); 1245 } 1246 1247 void acpi_pci_remove_bus(struct pci_bus *bus) 1248 { 1249 if (acpi_pci_disabled || !bus->bridge) 1250 return; 1251 1252 acpiphp_remove_slots(bus); 1253 acpi_pci_slot_remove(bus); 1254 } 1255 1256 /* ACPI bus type */ 1257 1258 1259 static DECLARE_RWSEM(pci_acpi_companion_lookup_sem); 1260 static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *); 1261 1262 /** 1263 * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback. 1264 * @func: ACPI companion lookup callback pointer or NULL. 1265 * 1266 * Set a special ACPI companion lookup callback for PCI devices whose companion 1267 * objects in the ACPI namespace have _ADR with non-standard bus-device-function 1268 * encodings. 1269 * 1270 * Return 0 on success or a negative error code on failure (in which case no 1271 * changes are made). 1272 * 1273 * The caller is responsible for the appropriate ordering of the invocations of 1274 * this function with respect to the enumeration of the PCI devices needing the 1275 * callback installed by it. 1276 */ 1277 int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *)) 1278 { 1279 int ret; 1280 1281 if (!func) 1282 return -EINVAL; 1283 1284 down_write(&pci_acpi_companion_lookup_sem); 1285 1286 if (pci_acpi_find_companion_hook) { 1287 ret = -EBUSY; 1288 } else { 1289 pci_acpi_find_companion_hook = func; 1290 ret = 0; 1291 } 1292 1293 up_write(&pci_acpi_companion_lookup_sem); 1294 1295 return ret; 1296 } 1297 EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook); 1298 1299 /** 1300 * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback. 1301 * 1302 * Clear the special ACPI companion lookup callback previously set by 1303 * pci_acpi_set_companion_lookup_hook(). Block until the last running instance 1304 * of the callback returns before clearing it. 1305 * 1306 * The caller is responsible for the appropriate ordering of the invocations of 1307 * this function with respect to the enumeration of the PCI devices needing the 1308 * callback cleared by it. 1309 */ 1310 void pci_acpi_clear_companion_lookup_hook(void) 1311 { 1312 down_write(&pci_acpi_companion_lookup_sem); 1313 1314 pci_acpi_find_companion_hook = NULL; 1315 1316 up_write(&pci_acpi_companion_lookup_sem); 1317 } 1318 EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook); 1319 1320 static struct acpi_device *acpi_pci_find_companion(struct device *dev) 1321 { 1322 struct pci_dev *pci_dev = to_pci_dev(dev); 1323 struct acpi_device *adev; 1324 bool check_children; 1325 u64 addr; 1326 1327 if (!dev->parent) 1328 return NULL; 1329 1330 down_read(&pci_acpi_companion_lookup_sem); 1331 1332 adev = pci_acpi_find_companion_hook ? 1333 pci_acpi_find_companion_hook(pci_dev) : NULL; 1334 1335 up_read(&pci_acpi_companion_lookup_sem); 1336 1337 if (adev) 1338 return adev; 1339 1340 check_children = pci_is_bridge(pci_dev); 1341 /* Please ref to ACPI spec for the syntax of _ADR */ 1342 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); 1343 adev = acpi_find_child_device(ACPI_COMPANION(dev->parent), addr, 1344 check_children); 1345 1346 /* 1347 * There may be ACPI device objects in the ACPI namespace that are 1348 * children of the device object representing the host bridge, but don't 1349 * represent PCI devices. Both _HID and _ADR may be present for them, 1350 * even though that is against the specification (for example, see 1351 * Section 6.1 of ACPI 6.3), but in many cases the _ADR returns 0 which 1352 * appears to indicate that they should not be taken into consideration 1353 * as potential companions of PCI devices on the root bus. 1354 * 1355 * To catch this special case, disregard the returned device object if 1356 * it has a valid _HID, addr is 0 and the PCI device at hand is on the 1357 * root bus. 1358 */ 1359 if (adev && adev->pnp.type.platform_id && !addr && 1360 pci_is_root_bus(pci_dev->bus)) 1361 return NULL; 1362 1363 return adev; 1364 } 1365 1366 /** 1367 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI 1368 * @pdev: the PCI device whose delay is to be updated 1369 * @handle: ACPI handle of this device 1370 * 1371 * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM 1372 * control method of either the device itself or the PCI host bridge. 1373 * 1374 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI 1375 * host bridge. If it returns one, the OS may assume that all devices in 1376 * the hierarchy have already completed power-on reset delays. 1377 * 1378 * Function 9, "Device Readiness Durations," applies only to the object 1379 * where it is located. It returns delay durations required after various 1380 * events if the device requires less time than the spec requires. Delays 1381 * from this function take precedence over the Reset Delay function. 1382 * 1383 * These _DSM functions are defined by the draft ECN of January 28, 2014, 1384 * titled "ACPI additions for FW latency optimizations." 1385 */ 1386 static void pci_acpi_optimize_delay(struct pci_dev *pdev, 1387 acpi_handle handle) 1388 { 1389 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); 1390 int value; 1391 union acpi_object *obj, *elements; 1392 1393 if (bridge->ignore_reset_delay) 1394 pdev->d3cold_delay = 0; 1395 1396 obj = acpi_evaluate_dsm_typed(handle, &pci_acpi_dsm_guid, 3, 1397 DSM_PCI_DEVICE_READINESS_DURATIONS, NULL, 1398 ACPI_TYPE_PACKAGE); 1399 if (!obj) 1400 return; 1401 1402 if (obj->package.count == 5) { 1403 elements = obj->package.elements; 1404 if (elements[0].type == ACPI_TYPE_INTEGER) { 1405 value = (int)elements[0].integer.value / 1000; 1406 if (value < PCI_PM_D3COLD_WAIT) 1407 pdev->d3cold_delay = value; 1408 } 1409 if (elements[3].type == ACPI_TYPE_INTEGER) { 1410 value = (int)elements[3].integer.value / 1000; 1411 if (value < PCI_PM_D3HOT_WAIT) 1412 pdev->d3hot_delay = value; 1413 } 1414 } 1415 ACPI_FREE(obj); 1416 } 1417 1418 static void pci_acpi_set_external_facing(struct pci_dev *dev) 1419 { 1420 u8 val; 1421 1422 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) 1423 return; 1424 if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val)) 1425 return; 1426 1427 /* 1428 * These root ports expose PCIe (including DMA) outside of the 1429 * system. Everything downstream from them is external. 1430 */ 1431 if (val) 1432 dev->external_facing = 1; 1433 } 1434 1435 void pci_acpi_setup(struct device *dev, struct acpi_device *adev) 1436 { 1437 struct pci_dev *pci_dev = to_pci_dev(dev); 1438 1439 pci_acpi_optimize_delay(pci_dev, adev->handle); 1440 pci_acpi_set_external_facing(pci_dev); 1441 pci_acpi_add_edr_notifier(pci_dev); 1442 1443 pci_acpi_add_pm_notifier(adev, pci_dev); 1444 if (!adev->wakeup.flags.valid) 1445 return; 1446 1447 device_set_wakeup_capable(dev, true); 1448 /* 1449 * For bridges that can do D3 we enable wake automatically (as 1450 * we do for the power management itself in that case). The 1451 * reason is that the bridge may have additional methods such as 1452 * _DSW that need to be called. 1453 */ 1454 if (pci_dev->bridge_d3) 1455 device_wakeup_enable(dev); 1456 1457 acpi_pci_wakeup(pci_dev, false); 1458 acpi_device_power_add_dependent(adev, dev); 1459 1460 if (pci_is_bridge(pci_dev)) 1461 acpi_dev_power_up_children_with_adr(adev); 1462 } 1463 1464 void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev) 1465 { 1466 struct pci_dev *pci_dev = to_pci_dev(dev); 1467 1468 pci_acpi_remove_edr_notifier(pci_dev); 1469 pci_acpi_remove_pm_notifier(adev); 1470 if (adev->wakeup.flags.valid) { 1471 acpi_device_power_remove_dependent(adev, dev); 1472 if (pci_dev->bridge_d3) 1473 device_wakeup_disable(dev); 1474 1475 device_set_wakeup_capable(dev, false); 1476 } 1477 } 1478 1479 static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev); 1480 1481 /** 1482 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode 1483 * @fn: Callback matching a device to a fwnode that identifies a PCI 1484 * MSI domain. 1485 * 1486 * This should be called by irqchip driver, which is the parent of 1487 * the MSI domain to provide callback interface to query fwnode. 1488 */ 1489 void 1490 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)) 1491 { 1492 pci_msi_get_fwnode_cb = fn; 1493 } 1494 1495 /** 1496 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge 1497 * @bus: The PCI host bridge bus. 1498 * 1499 * This function uses the callback function registered by 1500 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with 1501 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus. 1502 * This returns NULL on error or when the domain is not found. 1503 */ 1504 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) 1505 { 1506 struct fwnode_handle *fwnode; 1507 1508 if (!pci_msi_get_fwnode_cb) 1509 return NULL; 1510 1511 fwnode = pci_msi_get_fwnode_cb(&bus->dev); 1512 if (!fwnode) 1513 return NULL; 1514 1515 return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI); 1516 } 1517 1518 static int __init acpi_pci_init(void) 1519 { 1520 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) { 1521 pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n"); 1522 pci_no_msi(); 1523 } 1524 1525 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { 1526 pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); 1527 pcie_no_aspm(); 1528 } 1529 1530 if (acpi_pci_disabled) 1531 return 0; 1532 1533 acpi_pci_slot_init(); 1534 acpiphp_init(); 1535 1536 return 0; 1537 } 1538 arch_initcall(acpi_pci_init); 1539 1540 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) 1541 1542 /* 1543 * Try to assign the IRQ number when probing a new device 1544 */ 1545 int pcibios_alloc_irq(struct pci_dev *dev) 1546 { 1547 if (!acpi_disabled) 1548 acpi_pci_irq_enable(dev); 1549 1550 return 0; 1551 } 1552 1553 struct acpi_pci_generic_root_info { 1554 struct acpi_pci_root_info common; 1555 struct pci_config_window *cfg; /* config space mapping */ 1556 }; 1557 1558 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) 1559 { 1560 struct pci_config_window *cfg = bus->sysdata; 1561 struct acpi_device *adev = to_acpi_device(cfg->parent); 1562 struct acpi_pci_root *root = acpi_driver_data(adev); 1563 1564 return root->segment; 1565 } 1566 1567 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 1568 { 1569 struct pci_config_window *cfg; 1570 struct acpi_device *adev; 1571 struct device *bus_dev; 1572 1573 if (acpi_disabled) 1574 return 0; 1575 1576 cfg = bridge->bus->sysdata; 1577 1578 /* 1579 * On Hyper-V there is no corresponding ACPI device for a root bridge, 1580 * therefore ->parent is set as NULL by the driver. And set 'adev' as 1581 * NULL in this case because there is no proper ACPI device. 1582 */ 1583 if (!cfg->parent) 1584 adev = NULL; 1585 else 1586 adev = to_acpi_device(cfg->parent); 1587 1588 bus_dev = &bridge->bus->dev; 1589 1590 ACPI_COMPANION_SET(&bridge->dev, adev); 1591 set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev))); 1592 1593 return 0; 1594 } 1595 1596 static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci) 1597 { 1598 struct resource_entry *entry, *tmp; 1599 int status; 1600 1601 status = acpi_pci_probe_root_resources(ci); 1602 resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { 1603 if (!(entry->res->flags & IORESOURCE_WINDOW)) 1604 resource_list_destroy_entry(entry); 1605 } 1606 return status; 1607 } 1608 1609 /* 1610 * Lookup the bus range for the domain in MCFG, and set up config space 1611 * mapping. 1612 */ 1613 static struct pci_config_window * 1614 pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) 1615 { 1616 struct device *dev = &root->device->dev; 1617 struct resource *bus_res = &root->secondary; 1618 u16 seg = root->segment; 1619 const struct pci_ecam_ops *ecam_ops; 1620 struct resource cfgres; 1621 struct acpi_device *adev; 1622 struct pci_config_window *cfg; 1623 int ret; 1624 1625 ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops); 1626 if (ret) { 1627 dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); 1628 return NULL; 1629 } 1630 1631 adev = acpi_resource_consumer(&cfgres); 1632 if (adev) 1633 dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres, 1634 dev_name(&adev->dev)); 1635 else 1636 dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n", 1637 &cfgres); 1638 1639 cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); 1640 if (IS_ERR(cfg)) { 1641 dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, 1642 PTR_ERR(cfg)); 1643 return NULL; 1644 } 1645 1646 return cfg; 1647 } 1648 1649 /* release_info: free resources allocated by init_info */ 1650 static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci) 1651 { 1652 struct acpi_pci_generic_root_info *ri; 1653 1654 ri = container_of(ci, struct acpi_pci_generic_root_info, common); 1655 pci_ecam_free(ri->cfg); 1656 kfree(ci->ops); 1657 kfree(ri); 1658 } 1659 1660 /* Interface called from ACPI code to setup PCI host controller */ 1661 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) 1662 { 1663 struct acpi_pci_generic_root_info *ri; 1664 struct pci_bus *bus, *child; 1665 struct acpi_pci_root_ops *root_ops; 1666 struct pci_host_bridge *host; 1667 1668 ri = kzalloc(sizeof(*ri), GFP_KERNEL); 1669 if (!ri) 1670 return NULL; 1671 1672 root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); 1673 if (!root_ops) { 1674 kfree(ri); 1675 return NULL; 1676 } 1677 1678 ri->cfg = pci_acpi_setup_ecam_mapping(root); 1679 if (!ri->cfg) { 1680 kfree(ri); 1681 kfree(root_ops); 1682 return NULL; 1683 } 1684 1685 root_ops->release_info = pci_acpi_generic_release_info; 1686 root_ops->prepare_resources = pci_acpi_root_prepare_resources; 1687 root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops; 1688 bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg); 1689 if (!bus) 1690 return NULL; 1691 1692 /* If we must preserve the resource configuration, claim now */ 1693 host = pci_find_host_bridge(bus); 1694 if (host->preserve_config) 1695 pci_bus_claim_resources(bus); 1696 1697 /* 1698 * Assign whatever was left unassigned. If we didn't claim above, 1699 * this will reassign everything. 1700 */ 1701 pci_assign_unassigned_root_bus_resources(bus); 1702 1703 list_for_each_entry(child, &bus->children, node) 1704 pcie_bus_configure_settings(child); 1705 1706 return bus; 1707 } 1708 1709 void pcibios_add_bus(struct pci_bus *bus) 1710 { 1711 acpi_pci_add_bus(bus); 1712 } 1713 1714 void pcibios_remove_bus(struct pci_bus *bus) 1715 { 1716 acpi_pci_remove_bus(bus); 1717 } 1718 1719 #endif 1720