1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI support in ACPI 4 * 5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com> 6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com> 7 * Copyright (C) 2004 Intel Corp. 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/init.h> 12 #include <linux/irqdomain.h> 13 #include <linux/pci.h> 14 #include <linux/msi.h> 15 #include <linux/pci_hotplug.h> 16 #include <linux/module.h> 17 #include <linux/pci-acpi.h> 18 #include <linux/pci-ecam.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/pm_qos.h> 21 #include <linux/rwsem.h> 22 #include "pci.h" 23 24 /* 25 * The GUID is defined in the PCI Firmware Specification available 26 * here to PCI-SIG members: 27 * https://members.pcisig.com/wg/PCI-SIG/document/15350 28 */ 29 const guid_t pci_acpi_dsm_guid = 30 GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a, 31 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d); 32 33 #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) 34 static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) 35 { 36 struct device *dev = &adev->dev; 37 struct resource_entry *entry; 38 struct list_head list; 39 unsigned long flags; 40 int ret; 41 42 INIT_LIST_HEAD(&list); 43 flags = IORESOURCE_MEM; 44 ret = acpi_dev_get_resources(adev, &list, 45 acpi_dev_filter_resource_type_cb, 46 (void *) flags); 47 if (ret < 0) { 48 dev_err(dev, "failed to parse _CRS method, error code %d\n", 49 ret); 50 return ret; 51 } 52 53 if (ret == 0) { 54 dev_err(dev, "no IO and memory resources present in _CRS\n"); 55 return -EINVAL; 56 } 57 58 entry = list_first_entry(&list, struct resource_entry, node); 59 *res = *entry->res; 60 acpi_dev_free_resource_list(&list); 61 return 0; 62 } 63 64 static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context, 65 void **retval) 66 { 67 u16 *segment = context; 68 unsigned long long uid; 69 acpi_status status; 70 71 status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid); 72 if (ACPI_FAILURE(status) || uid != *segment) 73 return AE_CTRL_DEPTH; 74 75 *(acpi_handle *)retval = handle; 76 return AE_CTRL_TERMINATE; 77 } 78 79 int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, 80 struct resource *res) 81 { 82 struct acpi_device *adev; 83 acpi_status status; 84 acpi_handle handle; 85 int ret; 86 87 status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle); 88 if (ACPI_FAILURE(status)) { 89 dev_err(dev, "can't find _HID %s device to locate resources\n", 90 hid); 91 return -ENODEV; 92 } 93 94 adev = acpi_fetch_acpi_dev(handle); 95 if (!adev) 96 return -ENODEV; 97 98 ret = acpi_get_rc_addr(adev, res); 99 if (ret) { 100 dev_err(dev, "can't get resource from %s\n", 101 dev_name(&adev->dev)); 102 return ret; 103 } 104 105 return 0; 106 } 107 #endif 108 109 phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) 110 { 111 acpi_status status = AE_NOT_EXIST; 112 unsigned long long mcfg_addr; 113 114 if (handle) 115 status = acpi_evaluate_integer(handle, METHOD_NAME__CBA, 116 NULL, &mcfg_addr); 117 if (ACPI_FAILURE(status)) 118 return 0; 119 120 return (phys_addr_t)mcfg_addr; 121 } 122 123 bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge) 124 { 125 if (ACPI_HANDLE(&host_bridge->dev)) { 126 union acpi_object *obj; 127 128 /* 129 * Evaluate the "PCI Boot Configuration" _DSM Function. If it 130 * exists and returns 0, we must preserve any PCI resource 131 * assignments made by firmware for this host bridge. 132 */ 133 obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(&host_bridge->dev), 134 &pci_acpi_dsm_guid, 135 1, DSM_PCI_PRESERVE_BOOT_CONFIG, 136 NULL, ACPI_TYPE_INTEGER); 137 if (obj && obj->integer.value == 0) 138 return true; 139 ACPI_FREE(obj); 140 } 141 142 return false; 143 } 144 145 /* _HPX PCI Setting Record (Type 0); same as _HPP */ 146 struct hpx_type0 { 147 u32 revision; /* Not present in _HPP */ 148 u8 cache_line_size; /* Not applicable to PCIe */ 149 u8 latency_timer; /* Not applicable to PCIe */ 150 u8 enable_serr; 151 u8 enable_perr; 152 }; 153 154 static struct hpx_type0 pci_default_type0 = { 155 .revision = 1, 156 .cache_line_size = 8, 157 .latency_timer = 0x40, 158 .enable_serr = 0, 159 .enable_perr = 0, 160 }; 161 162 static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx) 163 { 164 u16 pci_cmd, pci_bctl; 165 166 if (!hpx) 167 hpx = &pci_default_type0; 168 169 if (hpx->revision > 1) { 170 pci_warn(dev, "PCI settings rev %d not supported; using defaults\n", 171 hpx->revision); 172 hpx = &pci_default_type0; 173 } 174 175 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size); 176 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer); 177 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); 178 if (hpx->enable_serr) 179 pci_cmd |= PCI_COMMAND_SERR; 180 if (hpx->enable_perr) 181 pci_cmd |= PCI_COMMAND_PARITY; 182 pci_write_config_word(dev, PCI_COMMAND, pci_cmd); 183 184 /* Program bridge control value */ 185 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 186 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 187 hpx->latency_timer); 188 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); 189 if (hpx->enable_perr) 190 pci_bctl |= PCI_BRIDGE_CTL_PARITY; 191 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); 192 } 193 } 194 195 static acpi_status decode_type0_hpx_record(union acpi_object *record, 196 struct hpx_type0 *hpx0) 197 { 198 int i; 199 union acpi_object *fields = record->package.elements; 200 u32 revision = fields[1].integer.value; 201 202 switch (revision) { 203 case 1: 204 if (record->package.count != 6) 205 return AE_ERROR; 206 for (i = 2; i < 6; i++) 207 if (fields[i].type != ACPI_TYPE_INTEGER) 208 return AE_ERROR; 209 hpx0->revision = revision; 210 hpx0->cache_line_size = fields[2].integer.value; 211 hpx0->latency_timer = fields[3].integer.value; 212 hpx0->enable_serr = fields[4].integer.value; 213 hpx0->enable_perr = fields[5].integer.value; 214 break; 215 default: 216 pr_warn("%s: Type 0 Revision %d record not supported\n", 217 __func__, revision); 218 return AE_ERROR; 219 } 220 return AE_OK; 221 } 222 223 /* _HPX PCI-X Setting Record (Type 1) */ 224 struct hpx_type1 { 225 u32 revision; 226 u8 max_mem_read; 227 u8 avg_max_split; 228 u16 tot_max_split; 229 }; 230 231 static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx) 232 { 233 int pos; 234 235 if (!hpx) 236 return; 237 238 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 239 if (!pos) 240 return; 241 242 pci_warn(dev, "PCI-X settings not supported\n"); 243 } 244 245 static acpi_status decode_type1_hpx_record(union acpi_object *record, 246 struct hpx_type1 *hpx1) 247 { 248 int i; 249 union acpi_object *fields = record->package.elements; 250 u32 revision = fields[1].integer.value; 251 252 switch (revision) { 253 case 1: 254 if (record->package.count != 5) 255 return AE_ERROR; 256 for (i = 2; i < 5; i++) 257 if (fields[i].type != ACPI_TYPE_INTEGER) 258 return AE_ERROR; 259 hpx1->revision = revision; 260 hpx1->max_mem_read = fields[2].integer.value; 261 hpx1->avg_max_split = fields[3].integer.value; 262 hpx1->tot_max_split = fields[4].integer.value; 263 break; 264 default: 265 pr_warn("%s: Type 1 Revision %d record not supported\n", 266 __func__, revision); 267 return AE_ERROR; 268 } 269 return AE_OK; 270 } 271 272 static bool pcie_root_rcb_set(struct pci_dev *dev) 273 { 274 struct pci_dev *rp = pcie_find_root_port(dev); 275 u16 lnkctl; 276 277 if (!rp) 278 return false; 279 280 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); 281 if (lnkctl & PCI_EXP_LNKCTL_RCB) 282 return true; 283 284 return false; 285 } 286 287 /* _HPX PCI Express Setting Record (Type 2) */ 288 struct hpx_type2 { 289 u32 revision; 290 u32 unc_err_mask_and; 291 u32 unc_err_mask_or; 292 u32 unc_err_sever_and; 293 u32 unc_err_sever_or; 294 u32 cor_err_mask_and; 295 u32 cor_err_mask_or; 296 u32 adv_err_cap_and; 297 u32 adv_err_cap_or; 298 u16 pci_exp_devctl_and; 299 u16 pci_exp_devctl_or; 300 u16 pci_exp_lnkctl_and; 301 u16 pci_exp_lnkctl_or; 302 u32 sec_unc_err_sever_and; 303 u32 sec_unc_err_sever_or; 304 u32 sec_unc_err_mask_and; 305 u32 sec_unc_err_mask_or; 306 }; 307 308 static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx) 309 { 310 int pos; 311 u32 reg32; 312 313 if (!hpx) 314 return; 315 316 if (!pci_is_pcie(dev)) 317 return; 318 319 if (hpx->revision > 1) { 320 pci_warn(dev, "PCIe settings rev %d not supported\n", 321 hpx->revision); 322 return; 323 } 324 325 /* 326 * Don't allow _HPX to change MPS or MRRS settings. We manage 327 * those to make sure they're consistent with the rest of the 328 * platform. 329 */ 330 hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | 331 PCI_EXP_DEVCTL_READRQ; 332 hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | 333 PCI_EXP_DEVCTL_READRQ); 334 335 /* Initialize Device Control Register */ 336 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 337 ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or); 338 339 /* Initialize Link Control Register */ 340 if (pcie_cap_has_lnkctl(dev)) { 341 342 /* 343 * If the Root Port supports Read Completion Boundary of 344 * 128, set RCB to 128. Otherwise, clear it. 345 */ 346 hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; 347 hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; 348 if (pcie_root_rcb_set(dev)) 349 hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; 350 351 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, 352 ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or); 353 } 354 355 /* Find Advanced Error Reporting Enhanced Capability */ 356 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 357 if (!pos) 358 return; 359 360 /* Initialize Uncorrectable Error Mask Register */ 361 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32); 362 reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or; 363 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); 364 365 /* Initialize Uncorrectable Error Severity Register */ 366 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32); 367 reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or; 368 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); 369 370 /* Initialize Correctable Error Mask Register */ 371 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32); 372 reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or; 373 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); 374 375 /* Initialize Advanced Error Capabilities and Control Register */ 376 pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); 377 reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or; 378 379 /* Don't enable ECRC generation or checking if unsupported */ 380 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) 381 reg32 &= ~PCI_ERR_CAP_ECRC_GENE; 382 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) 383 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; 384 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); 385 386 /* 387 * FIXME: The following two registers are not supported yet. 388 * 389 * o Secondary Uncorrectable Error Severity Register 390 * o Secondary Uncorrectable Error Mask Register 391 */ 392 } 393 394 static acpi_status decode_type2_hpx_record(union acpi_object *record, 395 struct hpx_type2 *hpx2) 396 { 397 int i; 398 union acpi_object *fields = record->package.elements; 399 u32 revision = fields[1].integer.value; 400 401 switch (revision) { 402 case 1: 403 if (record->package.count != 18) 404 return AE_ERROR; 405 for (i = 2; i < 18; i++) 406 if (fields[i].type != ACPI_TYPE_INTEGER) 407 return AE_ERROR; 408 hpx2->revision = revision; 409 hpx2->unc_err_mask_and = fields[2].integer.value; 410 hpx2->unc_err_mask_or = fields[3].integer.value; 411 hpx2->unc_err_sever_and = fields[4].integer.value; 412 hpx2->unc_err_sever_or = fields[5].integer.value; 413 hpx2->cor_err_mask_and = fields[6].integer.value; 414 hpx2->cor_err_mask_or = fields[7].integer.value; 415 hpx2->adv_err_cap_and = fields[8].integer.value; 416 hpx2->adv_err_cap_or = fields[9].integer.value; 417 hpx2->pci_exp_devctl_and = fields[10].integer.value; 418 hpx2->pci_exp_devctl_or = fields[11].integer.value; 419 hpx2->pci_exp_lnkctl_and = fields[12].integer.value; 420 hpx2->pci_exp_lnkctl_or = fields[13].integer.value; 421 hpx2->sec_unc_err_sever_and = fields[14].integer.value; 422 hpx2->sec_unc_err_sever_or = fields[15].integer.value; 423 hpx2->sec_unc_err_mask_and = fields[16].integer.value; 424 hpx2->sec_unc_err_mask_or = fields[17].integer.value; 425 break; 426 default: 427 pr_warn("%s: Type 2 Revision %d record not supported\n", 428 __func__, revision); 429 return AE_ERROR; 430 } 431 return AE_OK; 432 } 433 434 /* _HPX PCI Express Setting Record (Type 3) */ 435 struct hpx_type3 { 436 u16 device_type; 437 u16 function_type; 438 u16 config_space_location; 439 u16 pci_exp_cap_id; 440 u16 pci_exp_cap_ver; 441 u16 pci_exp_vendor_id; 442 u16 dvsec_id; 443 u16 dvsec_rev; 444 u16 match_offset; 445 u32 match_mask_and; 446 u32 match_value; 447 u16 reg_offset; 448 u32 reg_mask_and; 449 u32 reg_mask_or; 450 }; 451 452 enum hpx_type3_dev_type { 453 HPX_TYPE_ENDPOINT = BIT(0), 454 HPX_TYPE_LEG_END = BIT(1), 455 HPX_TYPE_RC_END = BIT(2), 456 HPX_TYPE_RC_EC = BIT(3), 457 HPX_TYPE_ROOT_PORT = BIT(4), 458 HPX_TYPE_UPSTREAM = BIT(5), 459 HPX_TYPE_DOWNSTREAM = BIT(6), 460 HPX_TYPE_PCI_BRIDGE = BIT(7), 461 HPX_TYPE_PCIE_BRIDGE = BIT(8), 462 }; 463 464 static u16 hpx3_device_type(struct pci_dev *dev) 465 { 466 u16 pcie_type = pci_pcie_type(dev); 467 static const int pcie_to_hpx3_type[] = { 468 [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT, 469 [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END, 470 [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END, 471 [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC, 472 [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT, 473 [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM, 474 [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM, 475 [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE, 476 [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE, 477 }; 478 479 if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type)) 480 return 0; 481 482 return pcie_to_hpx3_type[pcie_type]; 483 } 484 485 enum hpx_type3_fn_type { 486 HPX_FN_NORMAL = BIT(0), 487 HPX_FN_SRIOV_PHYS = BIT(1), 488 HPX_FN_SRIOV_VIRT = BIT(2), 489 }; 490 491 static u8 hpx3_function_type(struct pci_dev *dev) 492 { 493 if (dev->is_virtfn) 494 return HPX_FN_SRIOV_VIRT; 495 else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0) 496 return HPX_FN_SRIOV_PHYS; 497 else 498 return HPX_FN_NORMAL; 499 } 500 501 static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id) 502 { 503 u8 cap_ver = hpx3_cap_id & 0xf; 504 505 if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id) 506 return true; 507 else if (cap_ver == pcie_cap_id) 508 return true; 509 510 return false; 511 } 512 513 enum hpx_type3_cfg_loc { 514 HPX_CFG_PCICFG = 0, 515 HPX_CFG_PCIE_CAP = 1, 516 HPX_CFG_PCIE_CAP_EXT = 2, 517 HPX_CFG_VEND_CAP = 3, 518 HPX_CFG_DVSEC = 4, 519 HPX_CFG_MAX, 520 }; 521 522 static void program_hpx_type3_register(struct pci_dev *dev, 523 const struct hpx_type3 *reg) 524 { 525 u32 match_reg, write_reg, header, orig_value; 526 u16 pos; 527 528 if (!(hpx3_device_type(dev) & reg->device_type)) 529 return; 530 531 if (!(hpx3_function_type(dev) & reg->function_type)) 532 return; 533 534 switch (reg->config_space_location) { 535 case HPX_CFG_PCICFG: 536 pos = 0; 537 break; 538 case HPX_CFG_PCIE_CAP: 539 pos = pci_find_capability(dev, reg->pci_exp_cap_id); 540 if (pos == 0) 541 return; 542 543 break; 544 case HPX_CFG_PCIE_CAP_EXT: 545 pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id); 546 if (pos == 0) 547 return; 548 549 pci_read_config_dword(dev, pos, &header); 550 if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header), 551 reg->pci_exp_cap_ver)) 552 return; 553 554 break; 555 case HPX_CFG_VEND_CAP: 556 case HPX_CFG_DVSEC: 557 default: 558 pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location"); 559 return; 560 } 561 562 pci_read_config_dword(dev, pos + reg->match_offset, &match_reg); 563 564 if ((match_reg & reg->match_mask_and) != reg->match_value) 565 return; 566 567 pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg); 568 orig_value = write_reg; 569 write_reg &= reg->reg_mask_and; 570 write_reg |= reg->reg_mask_or; 571 572 if (orig_value == write_reg) 573 return; 574 575 pci_write_config_dword(dev, pos + reg->reg_offset, write_reg); 576 577 pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x", 578 pos, orig_value, write_reg); 579 } 580 581 static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx) 582 { 583 if (!hpx) 584 return; 585 586 if (!pci_is_pcie(dev)) 587 return; 588 589 program_hpx_type3_register(dev, hpx); 590 } 591 592 static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, 593 union acpi_object *reg_fields) 594 { 595 hpx3_reg->device_type = reg_fields[0].integer.value; 596 hpx3_reg->function_type = reg_fields[1].integer.value; 597 hpx3_reg->config_space_location = reg_fields[2].integer.value; 598 hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value; 599 hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value; 600 hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value; 601 hpx3_reg->dvsec_id = reg_fields[6].integer.value; 602 hpx3_reg->dvsec_rev = reg_fields[7].integer.value; 603 hpx3_reg->match_offset = reg_fields[8].integer.value; 604 hpx3_reg->match_mask_and = reg_fields[9].integer.value; 605 hpx3_reg->match_value = reg_fields[10].integer.value; 606 hpx3_reg->reg_offset = reg_fields[11].integer.value; 607 hpx3_reg->reg_mask_and = reg_fields[12].integer.value; 608 hpx3_reg->reg_mask_or = reg_fields[13].integer.value; 609 } 610 611 static acpi_status program_type3_hpx_record(struct pci_dev *dev, 612 union acpi_object *record) 613 { 614 union acpi_object *fields = record->package.elements; 615 u32 desc_count, expected_length, revision; 616 union acpi_object *reg_fields; 617 struct hpx_type3 hpx3; 618 int i; 619 620 revision = fields[1].integer.value; 621 switch (revision) { 622 case 1: 623 desc_count = fields[2].integer.value; 624 expected_length = 3 + desc_count * 14; 625 626 if (record->package.count != expected_length) 627 return AE_ERROR; 628 629 for (i = 2; i < expected_length; i++) 630 if (fields[i].type != ACPI_TYPE_INTEGER) 631 return AE_ERROR; 632 633 for (i = 0; i < desc_count; i++) { 634 reg_fields = fields + 3 + i * 14; 635 parse_hpx3_register(&hpx3, reg_fields); 636 program_hpx_type3(dev, &hpx3); 637 } 638 639 break; 640 default: 641 printk(KERN_WARNING 642 "%s: Type 3 Revision %d record not supported\n", 643 __func__, revision); 644 return AE_ERROR; 645 } 646 return AE_OK; 647 } 648 649 static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle) 650 { 651 acpi_status status; 652 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 653 union acpi_object *package, *record, *fields; 654 struct hpx_type0 hpx0; 655 struct hpx_type1 hpx1; 656 struct hpx_type2 hpx2; 657 u32 type; 658 int i; 659 660 status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); 661 if (ACPI_FAILURE(status)) 662 return status; 663 664 package = (union acpi_object *)buffer.pointer; 665 if (package->type != ACPI_TYPE_PACKAGE) { 666 status = AE_ERROR; 667 goto exit; 668 } 669 670 for (i = 0; i < package->package.count; i++) { 671 record = &package->package.elements[i]; 672 if (record->type != ACPI_TYPE_PACKAGE) { 673 status = AE_ERROR; 674 goto exit; 675 } 676 677 fields = record->package.elements; 678 if (fields[0].type != ACPI_TYPE_INTEGER || 679 fields[1].type != ACPI_TYPE_INTEGER) { 680 status = AE_ERROR; 681 goto exit; 682 } 683 684 type = fields[0].integer.value; 685 switch (type) { 686 case 0: 687 memset(&hpx0, 0, sizeof(hpx0)); 688 status = decode_type0_hpx_record(record, &hpx0); 689 if (ACPI_FAILURE(status)) 690 goto exit; 691 program_hpx_type0(dev, &hpx0); 692 break; 693 case 1: 694 memset(&hpx1, 0, sizeof(hpx1)); 695 status = decode_type1_hpx_record(record, &hpx1); 696 if (ACPI_FAILURE(status)) 697 goto exit; 698 program_hpx_type1(dev, &hpx1); 699 break; 700 case 2: 701 memset(&hpx2, 0, sizeof(hpx2)); 702 status = decode_type2_hpx_record(record, &hpx2); 703 if (ACPI_FAILURE(status)) 704 goto exit; 705 program_hpx_type2(dev, &hpx2); 706 break; 707 case 3: 708 status = program_type3_hpx_record(dev, record); 709 if (ACPI_FAILURE(status)) 710 goto exit; 711 break; 712 default: 713 pr_err("%s: Type %d record not supported\n", 714 __func__, type); 715 status = AE_ERROR; 716 goto exit; 717 } 718 } 719 exit: 720 kfree(buffer.pointer); 721 return status; 722 } 723 724 static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle) 725 { 726 acpi_status status; 727 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 728 union acpi_object *package, *fields; 729 struct hpx_type0 hpx0; 730 int i; 731 732 memset(&hpx0, 0, sizeof(hpx0)); 733 734 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); 735 if (ACPI_FAILURE(status)) 736 return status; 737 738 package = (union acpi_object *) buffer.pointer; 739 if (package->type != ACPI_TYPE_PACKAGE || 740 package->package.count != 4) { 741 status = AE_ERROR; 742 goto exit; 743 } 744 745 fields = package->package.elements; 746 for (i = 0; i < 4; i++) { 747 if (fields[i].type != ACPI_TYPE_INTEGER) { 748 status = AE_ERROR; 749 goto exit; 750 } 751 } 752 753 hpx0.revision = 1; 754 hpx0.cache_line_size = fields[0].integer.value; 755 hpx0.latency_timer = fields[1].integer.value; 756 hpx0.enable_serr = fields[2].integer.value; 757 hpx0.enable_perr = fields[3].integer.value; 758 759 program_hpx_type0(dev, &hpx0); 760 761 exit: 762 kfree(buffer.pointer); 763 return status; 764 } 765 766 /* pci_acpi_program_hp_params 767 * 768 * @dev - the pci_dev for which we want parameters 769 */ 770 int pci_acpi_program_hp_params(struct pci_dev *dev) 771 { 772 acpi_status status; 773 acpi_handle handle, phandle; 774 struct pci_bus *pbus; 775 776 if (acpi_pci_disabled) 777 return -ENODEV; 778 779 handle = NULL; 780 for (pbus = dev->bus; pbus; pbus = pbus->parent) { 781 handle = acpi_pci_get_bridge_handle(pbus); 782 if (handle) 783 break; 784 } 785 786 /* 787 * _HPP settings apply to all child buses, until another _HPP is 788 * encountered. If we don't find an _HPP for the input pci dev, 789 * look for it in the parent device scope since that would apply to 790 * this pci dev. 791 */ 792 while (handle) { 793 status = acpi_run_hpx(dev, handle); 794 if (ACPI_SUCCESS(status)) 795 return 0; 796 status = acpi_run_hpp(dev, handle); 797 if (ACPI_SUCCESS(status)) 798 return 0; 799 if (acpi_is_root_bridge(handle)) 800 break; 801 status = acpi_get_parent(handle, &phandle); 802 if (ACPI_FAILURE(status)) 803 break; 804 handle = phandle; 805 } 806 return -ENODEV; 807 } 808 809 /** 810 * pciehp_is_native - Check whether a hotplug port is handled by the OS 811 * @bridge: Hotplug port to check 812 * 813 * Returns true if the given @bridge is handled by the native PCIe hotplug 814 * driver. 815 */ 816 bool pciehp_is_native(struct pci_dev *bridge) 817 { 818 const struct pci_host_bridge *host; 819 820 if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) 821 return false; 822 823 if (pcie_ports_native) 824 return true; 825 826 host = pci_find_host_bridge(bridge->bus); 827 return host->native_pcie_hotplug; 828 } 829 830 /** 831 * shpchp_is_native - Check whether a hotplug port is handled by the OS 832 * @bridge: Hotplug port to check 833 * 834 * Returns true if the given @bridge is handled by the native SHPC hotplug 835 * driver. 836 */ 837 bool shpchp_is_native(struct pci_dev *bridge) 838 { 839 return bridge->shpc_managed; 840 } 841 842 /** 843 * pci_acpi_wake_bus - Root bus wakeup notification fork function. 844 * @context: Device wakeup context. 845 */ 846 static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context) 847 { 848 struct acpi_device *adev; 849 struct acpi_pci_root *root; 850 851 adev = container_of(context, struct acpi_device, wakeup.context); 852 root = acpi_driver_data(adev); 853 pci_pme_wakeup_bus(root->bus); 854 } 855 856 /** 857 * pci_acpi_wake_dev - PCI device wakeup notification work function. 858 * @context: Device wakeup context. 859 */ 860 static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context) 861 { 862 struct pci_dev *pci_dev; 863 864 pci_dev = to_pci_dev(context->dev); 865 866 if (pci_dev->pme_poll) 867 pci_dev->pme_poll = false; 868 869 if (pci_dev->current_state == PCI_D3cold) { 870 pci_wakeup_event(pci_dev); 871 pm_request_resume(&pci_dev->dev); 872 return; 873 } 874 875 /* Clear PME Status if set. */ 876 if (pci_dev->pme_support) 877 pci_check_pme_status(pci_dev); 878 879 pci_wakeup_event(pci_dev); 880 pm_request_resume(&pci_dev->dev); 881 882 pci_pme_wakeup_bus(pci_dev->subordinate); 883 } 884 885 /** 886 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus. 887 * @dev: PCI root bridge ACPI device. 888 */ 889 acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev) 890 { 891 return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus); 892 } 893 894 /** 895 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. 896 * @dev: ACPI device to add the notifier for. 897 * @pci_dev: PCI device to check for the PME status if an event is signaled. 898 */ 899 acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, 900 struct pci_dev *pci_dev) 901 { 902 return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev); 903 } 904 905 /* 906 * _SxD returns the D-state with the highest power 907 * (lowest D-state number) supported in the S-state "x". 908 * 909 * If the devices does not have a _PRW 910 * (Power Resources for Wake) supporting system wakeup from "x" 911 * then the OS is free to choose a lower power (higher number 912 * D-state) than the return value from _SxD. 913 * 914 * But if _PRW is enabled at S-state "x", the OS 915 * must not choose a power lower than _SxD -- 916 * unless the device has an _SxW method specifying 917 * the lowest power (highest D-state number) the device 918 * may enter while still able to wake the system. 919 * 920 * ie. depending on global OS policy: 921 * 922 * if (_PRW at S-state x) 923 * choose from highest power _SxD to lowest power _SxW 924 * else // no _PRW at S-state x 925 * choose highest power _SxD or any lower power 926 */ 927 928 pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) 929 { 930 int acpi_state, d_max; 931 932 if (pdev->no_d3cold || !pdev->d3cold_allowed) 933 d_max = ACPI_STATE_D3_HOT; 934 else 935 d_max = ACPI_STATE_D3_COLD; 936 acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max); 937 if (acpi_state < 0) 938 return PCI_POWER_ERROR; 939 940 switch (acpi_state) { 941 case ACPI_STATE_D0: 942 return PCI_D0; 943 case ACPI_STATE_D1: 944 return PCI_D1; 945 case ACPI_STATE_D2: 946 return PCI_D2; 947 case ACPI_STATE_D3_HOT: 948 return PCI_D3hot; 949 case ACPI_STATE_D3_COLD: 950 return PCI_D3cold; 951 } 952 return PCI_POWER_ERROR; 953 } 954 955 static struct acpi_device *acpi_pci_find_companion(struct device *dev); 956 957 void pci_set_acpi_fwnode(struct pci_dev *dev) 958 { 959 if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev)) 960 ACPI_COMPANION_SET(&dev->dev, 961 acpi_pci_find_companion(&dev->dev)); 962 } 963 964 /** 965 * pci_dev_acpi_reset - do a function level reset using _RST method 966 * @dev: device to reset 967 * @probe: if true, return 0 if device supports _RST 968 */ 969 int pci_dev_acpi_reset(struct pci_dev *dev, bool probe) 970 { 971 acpi_handle handle = ACPI_HANDLE(&dev->dev); 972 973 if (!handle || !acpi_has_method(handle, "_RST")) 974 return -ENOTTY; 975 976 if (probe) 977 return 0; 978 979 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, NULL))) { 980 pci_warn(dev, "ACPI _RST failed\n"); 981 return -ENOTTY; 982 } 983 984 return 0; 985 } 986 987 bool acpi_pci_power_manageable(struct pci_dev *dev) 988 { 989 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 990 991 return adev && acpi_device_power_manageable(adev); 992 } 993 994 bool acpi_pci_bridge_d3(struct pci_dev *dev) 995 { 996 struct pci_dev *rpdev; 997 struct acpi_device *adev, *rpadev; 998 const union acpi_object *obj; 999 1000 if (acpi_pci_disabled || !dev->is_pciehp) 1001 return false; 1002 1003 adev = ACPI_COMPANION(&dev->dev); 1004 if (adev) { 1005 /* 1006 * If the bridge has _S0W, whether or not it can go into D3 1007 * depends on what is returned by that object. In particular, 1008 * if the power state returned by _S0W is D2 or shallower, 1009 * entering D3 should not be allowed. 1010 */ 1011 if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2) 1012 return false; 1013 1014 /* 1015 * Otherwise, assume that the bridge can enter D3 so long as it 1016 * is power-manageable via ACPI. 1017 */ 1018 if (acpi_device_power_manageable(adev)) 1019 return true; 1020 } 1021 1022 rpdev = pcie_find_root_port(dev); 1023 if (!rpdev) 1024 return false; 1025 1026 if (rpdev == dev) 1027 rpadev = adev; 1028 else 1029 rpadev = ACPI_COMPANION(&rpdev->dev); 1030 1031 if (!rpadev) 1032 return false; 1033 1034 /* 1035 * If the Root Port cannot signal wakeup signals at all, i.e., it 1036 * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug 1037 * events from low-power states including D3hot and D3cold. 1038 */ 1039 if (!rpadev->wakeup.flags.valid) 1040 return false; 1041 1042 /* 1043 * In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port 1044 * to verify whether or not it can signal wakeup from D3. 1045 */ 1046 if (rpadev != adev && 1047 acpi_dev_power_state_for_wake(rpadev) <= ACPI_STATE_D2) 1048 return false; 1049 1050 /* 1051 * The "HotPlugSupportInD3" property in a Root Port _DSD indicates 1052 * the Port can signal hotplug events while in D3. We assume any 1053 * bridges *below* that Root Port can also signal hotplug events 1054 * while in D3. 1055 */ 1056 if (!acpi_dev_get_property(rpadev, "HotPlugSupportInD3", 1057 ACPI_TYPE_INTEGER, &obj) && 1058 obj->integer.value == 1) 1059 return true; 1060 1061 return false; 1062 } 1063 1064 static void acpi_pci_config_space_access(struct pci_dev *dev, bool enable) 1065 { 1066 int val = enable ? ACPI_REG_CONNECT : ACPI_REG_DISCONNECT; 1067 int ret = acpi_evaluate_reg(ACPI_HANDLE(&dev->dev), 1068 ACPI_ADR_SPACE_PCI_CONFIG, val); 1069 if (ret) 1070 pci_dbg(dev, "ACPI _REG %s evaluation failed (%d)\n", 1071 enable ? "connect" : "disconnect", ret); 1072 } 1073 1074 int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) 1075 { 1076 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 1077 static const u8 state_conv[] = { 1078 [PCI_D0] = ACPI_STATE_D0, 1079 [PCI_D1] = ACPI_STATE_D1, 1080 [PCI_D2] = ACPI_STATE_D2, 1081 [PCI_D3hot] = ACPI_STATE_D3_HOT, 1082 [PCI_D3cold] = ACPI_STATE_D3_COLD, 1083 }; 1084 int error; 1085 1086 /* If the ACPI device has _EJ0, ignore the device */ 1087 if (!adev || acpi_has_method(adev->handle, "_EJ0")) 1088 return -ENODEV; 1089 1090 switch (state) { 1091 case PCI_D0: 1092 case PCI_D1: 1093 case PCI_D2: 1094 case PCI_D3hot: 1095 case PCI_D3cold: 1096 break; 1097 default: 1098 return -EINVAL; 1099 } 1100 1101 if (state == PCI_D3cold) { 1102 if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == 1103 PM_QOS_FLAGS_ALL) 1104 return -EBUSY; 1105 1106 /* Notify AML lack of PCI config space availability */ 1107 acpi_pci_config_space_access(dev, false); 1108 } 1109 1110 error = acpi_device_set_power(adev, state_conv[state]); 1111 if (error) 1112 return error; 1113 1114 pci_dbg(dev, "power state changed by ACPI to %s\n", 1115 acpi_power_state_string(adev->power.state)); 1116 1117 /* 1118 * Notify AML of PCI config space availability. Config space is 1119 * accessible in all states except D3cold; the only transitions 1120 * that change availability are transitions to D3cold and from 1121 * D3cold to D0. 1122 */ 1123 if (state == PCI_D0) 1124 acpi_pci_config_space_access(dev, true); 1125 1126 return 0; 1127 } 1128 1129 pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) 1130 { 1131 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 1132 static const pci_power_t state_conv[] = { 1133 [ACPI_STATE_D0] = PCI_D0, 1134 [ACPI_STATE_D1] = PCI_D1, 1135 [ACPI_STATE_D2] = PCI_D2, 1136 [ACPI_STATE_D3_HOT] = PCI_D3hot, 1137 [ACPI_STATE_D3_COLD] = PCI_D3cold, 1138 }; 1139 int state; 1140 1141 if (!adev || !acpi_device_power_manageable(adev)) 1142 return PCI_UNKNOWN; 1143 1144 state = adev->power.state; 1145 if (state == ACPI_STATE_UNKNOWN) 1146 return PCI_UNKNOWN; 1147 1148 return state_conv[state]; 1149 } 1150 1151 void acpi_pci_refresh_power_state(struct pci_dev *dev) 1152 { 1153 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 1154 1155 if (adev && acpi_device_power_manageable(adev)) 1156 acpi_device_update_power(adev, NULL); 1157 } 1158 1159 static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) 1160 { 1161 while (bus->parent) { 1162 if (acpi_pm_device_can_wakeup(&bus->self->dev)) 1163 return acpi_pm_set_device_wakeup(&bus->self->dev, enable); 1164 1165 bus = bus->parent; 1166 } 1167 1168 /* We have reached the root bus. */ 1169 if (bus->bridge) { 1170 if (acpi_pm_device_can_wakeup(bus->bridge)) 1171 return acpi_pm_set_device_wakeup(bus->bridge, enable); 1172 } 1173 return 0; 1174 } 1175 1176 int acpi_pci_wakeup(struct pci_dev *dev, bool enable) 1177 { 1178 if (acpi_pci_disabled) 1179 return 0; 1180 1181 if (acpi_pm_device_can_wakeup(&dev->dev)) 1182 return acpi_pm_set_device_wakeup(&dev->dev, enable); 1183 1184 return acpi_pci_propagate_wakeup(dev->bus, enable); 1185 } 1186 1187 bool acpi_pci_need_resume(struct pci_dev *dev) 1188 { 1189 struct acpi_device *adev; 1190 1191 if (acpi_pci_disabled) 1192 return false; 1193 1194 /* 1195 * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over 1196 * system-wide suspend/resume confuses the platform firmware, so avoid 1197 * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint 1198 * devices are expected to be in D3 before invoking the S3 entry path 1199 * from the firmware, so they should not be affected by this issue. 1200 */ 1201 if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0) 1202 return true; 1203 1204 adev = ACPI_COMPANION(&dev->dev); 1205 if (!adev || !acpi_device_power_manageable(adev)) 1206 return false; 1207 1208 if (adev->wakeup.flags.valid && 1209 device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) 1210 return true; 1211 1212 if (acpi_target_system_state() == ACPI_STATE_S0) 1213 return false; 1214 1215 return !!adev->power.flags.dsw_present; 1216 } 1217 1218 void acpi_pci_add_bus(struct pci_bus *bus) 1219 { 1220 union acpi_object *obj; 1221 struct pci_host_bridge *bridge; 1222 1223 if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) 1224 return; 1225 1226 acpi_pci_slot_enumerate(bus); 1227 acpiphp_enumerate_slots(bus); 1228 1229 /* 1230 * For a host bridge, check its _DSM for function 8 and if 1231 * that is available, mark it in pci_host_bridge. 1232 */ 1233 if (!pci_is_root_bus(bus)) 1234 return; 1235 1236 obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3, 1237 DSM_PCI_POWER_ON_RESET_DELAY, NULL, ACPI_TYPE_INTEGER); 1238 if (!obj) 1239 return; 1240 1241 if (obj->integer.value == 1) { 1242 bridge = pci_find_host_bridge(bus); 1243 bridge->ignore_reset_delay = 1; 1244 } 1245 ACPI_FREE(obj); 1246 } 1247 1248 void acpi_pci_remove_bus(struct pci_bus *bus) 1249 { 1250 if (acpi_pci_disabled || !bus->bridge) 1251 return; 1252 1253 acpiphp_remove_slots(bus); 1254 acpi_pci_slot_remove(bus); 1255 } 1256 1257 /* ACPI bus type */ 1258 1259 1260 static DECLARE_RWSEM(pci_acpi_companion_lookup_sem); 1261 static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *); 1262 1263 /** 1264 * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback. 1265 * @func: ACPI companion lookup callback pointer or NULL. 1266 * 1267 * Set a special ACPI companion lookup callback for PCI devices whose companion 1268 * objects in the ACPI namespace have _ADR with non-standard bus-device-function 1269 * encodings. 1270 * 1271 * Return 0 on success or a negative error code on failure (in which case no 1272 * changes are made). 1273 * 1274 * The caller is responsible for the appropriate ordering of the invocations of 1275 * this function with respect to the enumeration of the PCI devices needing the 1276 * callback installed by it. 1277 */ 1278 int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *)) 1279 { 1280 int ret; 1281 1282 if (!func) 1283 return -EINVAL; 1284 1285 down_write(&pci_acpi_companion_lookup_sem); 1286 1287 if (pci_acpi_find_companion_hook) { 1288 ret = -EBUSY; 1289 } else { 1290 pci_acpi_find_companion_hook = func; 1291 ret = 0; 1292 } 1293 1294 up_write(&pci_acpi_companion_lookup_sem); 1295 1296 return ret; 1297 } 1298 EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook); 1299 1300 /** 1301 * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback. 1302 * 1303 * Clear the special ACPI companion lookup callback previously set by 1304 * pci_acpi_set_companion_lookup_hook(). Block until the last running instance 1305 * of the callback returns before clearing it. 1306 * 1307 * The caller is responsible for the appropriate ordering of the invocations of 1308 * this function with respect to the enumeration of the PCI devices needing the 1309 * callback cleared by it. 1310 */ 1311 void pci_acpi_clear_companion_lookup_hook(void) 1312 { 1313 down_write(&pci_acpi_companion_lookup_sem); 1314 1315 pci_acpi_find_companion_hook = NULL; 1316 1317 up_write(&pci_acpi_companion_lookup_sem); 1318 } 1319 EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook); 1320 1321 static struct acpi_device *acpi_pci_find_companion(struct device *dev) 1322 { 1323 struct pci_dev *pci_dev = to_pci_dev(dev); 1324 struct acpi_device *adev; 1325 bool check_children; 1326 u64 addr; 1327 1328 if (!dev->parent) 1329 return NULL; 1330 1331 down_read(&pci_acpi_companion_lookup_sem); 1332 1333 adev = pci_acpi_find_companion_hook ? 1334 pci_acpi_find_companion_hook(pci_dev) : NULL; 1335 1336 up_read(&pci_acpi_companion_lookup_sem); 1337 1338 if (adev) 1339 return adev; 1340 1341 check_children = pci_is_bridge(pci_dev); 1342 /* Please ref to ACPI spec for the syntax of _ADR */ 1343 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); 1344 adev = acpi_find_child_device(ACPI_COMPANION(dev->parent), addr, 1345 check_children); 1346 1347 /* 1348 * There may be ACPI device objects in the ACPI namespace that are 1349 * children of the device object representing the host bridge, but don't 1350 * represent PCI devices. Both _HID and _ADR may be present for them, 1351 * even though that is against the specification (for example, see 1352 * Section 6.1 of ACPI 6.3), but in many cases the _ADR returns 0 which 1353 * appears to indicate that they should not be taken into consideration 1354 * as potential companions of PCI devices on the root bus. 1355 * 1356 * To catch this special case, disregard the returned device object if 1357 * it has a valid _HID, addr is 0 and the PCI device at hand is on the 1358 * root bus. 1359 */ 1360 if (adev && adev->pnp.type.platform_id && !addr && 1361 pci_is_root_bus(pci_dev->bus)) 1362 return NULL; 1363 1364 return adev; 1365 } 1366 1367 /** 1368 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI 1369 * @pdev: the PCI device whose delay is to be updated 1370 * @handle: ACPI handle of this device 1371 * 1372 * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM 1373 * control method of either the device itself or the PCI host bridge. 1374 * 1375 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI 1376 * host bridge. If it returns one, the OS may assume that all devices in 1377 * the hierarchy have already completed power-on reset delays. 1378 * 1379 * Function 9, "Device Readiness Durations," applies only to the object 1380 * where it is located. It returns delay durations required after various 1381 * events if the device requires less time than the spec requires. Delays 1382 * from this function take precedence over the Reset Delay function. 1383 * 1384 * These _DSM functions are defined by the draft ECN of January 28, 2014, 1385 * titled "ACPI additions for FW latency optimizations." 1386 */ 1387 static void pci_acpi_optimize_delay(struct pci_dev *pdev, 1388 acpi_handle handle) 1389 { 1390 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); 1391 int value; 1392 union acpi_object *obj, *elements; 1393 1394 if (bridge->ignore_reset_delay) 1395 pdev->d3cold_delay = 0; 1396 1397 obj = acpi_evaluate_dsm_typed(handle, &pci_acpi_dsm_guid, 3, 1398 DSM_PCI_DEVICE_READINESS_DURATIONS, NULL, 1399 ACPI_TYPE_PACKAGE); 1400 if (!obj) 1401 return; 1402 1403 if (obj->package.count == 5) { 1404 elements = obj->package.elements; 1405 if (elements[0].type == ACPI_TYPE_INTEGER) { 1406 value = (int)elements[0].integer.value / 1000; 1407 if (value < PCI_PM_D3COLD_WAIT) 1408 pdev->d3cold_delay = value; 1409 } 1410 if (elements[3].type == ACPI_TYPE_INTEGER) { 1411 value = (int)elements[3].integer.value / 1000; 1412 if (value < PCI_PM_D3HOT_WAIT) 1413 pdev->d3hot_delay = value; 1414 } 1415 } 1416 ACPI_FREE(obj); 1417 } 1418 1419 static void pci_acpi_set_external_facing(struct pci_dev *dev) 1420 { 1421 u8 val; 1422 1423 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) 1424 return; 1425 if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val)) 1426 return; 1427 1428 /* 1429 * These root ports expose PCIe (including DMA) outside of the 1430 * system. Everything downstream from them is external. 1431 */ 1432 if (val) 1433 dev->external_facing = 1; 1434 } 1435 1436 void pci_acpi_setup(struct device *dev, struct acpi_device *adev) 1437 { 1438 struct pci_dev *pci_dev = to_pci_dev(dev); 1439 1440 pci_acpi_optimize_delay(pci_dev, adev->handle); 1441 pci_acpi_set_external_facing(pci_dev); 1442 pci_acpi_add_edr_notifier(pci_dev); 1443 1444 pci_acpi_add_pm_notifier(adev, pci_dev); 1445 if (!adev->wakeup.flags.valid) 1446 return; 1447 1448 device_set_wakeup_capable(dev, true); 1449 /* 1450 * For bridges that can do D3 we enable wake automatically (as 1451 * we do for the power management itself in that case). The 1452 * reason is that the bridge may have additional methods such as 1453 * _DSW that need to be called. 1454 */ 1455 if (pci_dev->bridge_d3) 1456 device_wakeup_enable(dev); 1457 1458 acpi_pci_wakeup(pci_dev, false); 1459 acpi_device_power_add_dependent(adev, dev); 1460 1461 if (pci_is_bridge(pci_dev)) 1462 acpi_dev_power_up_children_with_adr(adev); 1463 } 1464 1465 void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev) 1466 { 1467 struct pci_dev *pci_dev = to_pci_dev(dev); 1468 1469 pci_acpi_remove_edr_notifier(pci_dev); 1470 pci_acpi_remove_pm_notifier(adev); 1471 if (adev->wakeup.flags.valid) { 1472 acpi_device_power_remove_dependent(adev, dev); 1473 if (pci_dev->bridge_d3) 1474 device_wakeup_disable(dev); 1475 1476 device_set_wakeup_capable(dev, false); 1477 } 1478 } 1479 1480 static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev); 1481 1482 /** 1483 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode 1484 * @fn: Callback matching a device to a fwnode that identifies a PCI 1485 * MSI domain. 1486 * 1487 * This should be called by irqchip driver, which is the parent of 1488 * the MSI domain to provide callback interface to query fwnode. 1489 */ 1490 void 1491 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)) 1492 { 1493 pci_msi_get_fwnode_cb = fn; 1494 } 1495 1496 /** 1497 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge 1498 * @bus: The PCI host bridge bus. 1499 * 1500 * This function uses the callback function registered by 1501 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with 1502 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus. 1503 * This returns NULL on error or when the domain is not found. 1504 */ 1505 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) 1506 { 1507 struct fwnode_handle *fwnode; 1508 1509 if (!pci_msi_get_fwnode_cb) 1510 return NULL; 1511 1512 fwnode = pci_msi_get_fwnode_cb(&bus->dev); 1513 if (!fwnode) 1514 return NULL; 1515 1516 return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI); 1517 } 1518 1519 static int __init acpi_pci_init(void) 1520 { 1521 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) { 1522 pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n"); 1523 pci_no_msi(); 1524 } 1525 1526 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { 1527 pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); 1528 pcie_no_aspm(); 1529 } 1530 1531 if (acpi_pci_disabled) 1532 return 0; 1533 1534 acpi_pci_slot_init(); 1535 acpiphp_init(); 1536 1537 return 0; 1538 } 1539 arch_initcall(acpi_pci_init); 1540 1541 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) 1542 1543 /* 1544 * Try to assign the IRQ number when probing a new device 1545 */ 1546 int pcibios_alloc_irq(struct pci_dev *dev) 1547 { 1548 if (!acpi_disabled) 1549 acpi_pci_irq_enable(dev); 1550 1551 return 0; 1552 } 1553 1554 struct acpi_pci_generic_root_info { 1555 struct acpi_pci_root_info common; 1556 struct pci_config_window *cfg; /* config space mapping */ 1557 }; 1558 1559 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) 1560 { 1561 struct pci_config_window *cfg = bus->sysdata; 1562 struct acpi_device *adev = to_acpi_device(cfg->parent); 1563 struct acpi_pci_root *root = acpi_driver_data(adev); 1564 1565 return root->segment; 1566 } 1567 1568 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 1569 { 1570 struct pci_config_window *cfg; 1571 struct acpi_device *adev; 1572 struct device *bus_dev; 1573 1574 if (acpi_disabled) 1575 return 0; 1576 1577 cfg = bridge->bus->sysdata; 1578 1579 /* 1580 * On Hyper-V there is no corresponding ACPI device for a root bridge, 1581 * therefore ->parent is set as NULL by the driver. And set 'adev' as 1582 * NULL in this case because there is no proper ACPI device. 1583 */ 1584 if (!cfg->parent) 1585 adev = NULL; 1586 else 1587 adev = to_acpi_device(cfg->parent); 1588 1589 bus_dev = &bridge->bus->dev; 1590 1591 ACPI_COMPANION_SET(&bridge->dev, adev); 1592 set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev))); 1593 1594 return 0; 1595 } 1596 1597 static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci) 1598 { 1599 struct resource_entry *entry, *tmp; 1600 int status; 1601 1602 status = acpi_pci_probe_root_resources(ci); 1603 resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { 1604 if (!(entry->res->flags & IORESOURCE_WINDOW)) 1605 resource_list_destroy_entry(entry); 1606 } 1607 return status; 1608 } 1609 1610 /* 1611 * Lookup the bus range for the domain in MCFG, and set up config space 1612 * mapping. 1613 */ 1614 static struct pci_config_window * 1615 pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) 1616 { 1617 struct device *dev = &root->device->dev; 1618 struct resource *bus_res = &root->secondary; 1619 u16 seg = root->segment; 1620 const struct pci_ecam_ops *ecam_ops; 1621 struct resource cfgres; 1622 struct acpi_device *adev; 1623 struct pci_config_window *cfg; 1624 int ret; 1625 1626 ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops); 1627 if (ret) { 1628 dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); 1629 return NULL; 1630 } 1631 1632 adev = acpi_resource_consumer(&cfgres); 1633 if (adev) 1634 dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres, 1635 dev_name(&adev->dev)); 1636 else 1637 dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n", 1638 &cfgres); 1639 1640 cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); 1641 if (IS_ERR(cfg)) { 1642 dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, 1643 PTR_ERR(cfg)); 1644 return NULL; 1645 } 1646 1647 return cfg; 1648 } 1649 1650 /* release_info: free resources allocated by init_info */ 1651 static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci) 1652 { 1653 struct acpi_pci_generic_root_info *ri; 1654 1655 ri = container_of(ci, struct acpi_pci_generic_root_info, common); 1656 pci_ecam_free(ri->cfg); 1657 kfree(ci->ops); 1658 kfree(ri); 1659 } 1660 1661 /* Interface called from ACPI code to setup PCI host controller */ 1662 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) 1663 { 1664 struct acpi_pci_generic_root_info *ri; 1665 struct pci_bus *bus, *child; 1666 struct acpi_pci_root_ops *root_ops; 1667 struct pci_host_bridge *host; 1668 1669 ri = kzalloc(sizeof(*ri), GFP_KERNEL); 1670 if (!ri) 1671 return NULL; 1672 1673 root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); 1674 if (!root_ops) { 1675 kfree(ri); 1676 return NULL; 1677 } 1678 1679 ri->cfg = pci_acpi_setup_ecam_mapping(root); 1680 if (!ri->cfg) { 1681 kfree(ri); 1682 kfree(root_ops); 1683 return NULL; 1684 } 1685 1686 root_ops->release_info = pci_acpi_generic_release_info; 1687 root_ops->prepare_resources = pci_acpi_root_prepare_resources; 1688 root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops; 1689 bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg); 1690 if (!bus) 1691 return NULL; 1692 1693 /* If we must preserve the resource configuration, claim now */ 1694 host = pci_find_host_bridge(bus); 1695 if (host->preserve_config) 1696 pci_bus_claim_resources(bus); 1697 1698 /* 1699 * Assign whatever was left unassigned. If we didn't claim above, 1700 * this will reassign everything. 1701 */ 1702 pci_assign_unassigned_root_bus_resources(bus); 1703 1704 list_for_each_entry(child, &bus->children, node) 1705 pcie_bus_configure_settings(child); 1706 1707 return bus; 1708 } 1709 1710 void pcibios_add_bus(struct pci_bus *bus) 1711 { 1712 acpi_pci_add_bus(bus); 1713 } 1714 1715 void pcibios_remove_bus(struct pci_bus *bus) 1716 { 1717 acpi_pci_remove_bus(bus); 1718 } 1719 1720 #endif 1721