1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI support in ACPI 4 * 5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com> 6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com> 7 * Copyright (C) 2004 Intel Corp. 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/init.h> 12 #include <linux/irqdomain.h> 13 #include <linux/pci.h> 14 #include <linux/msi.h> 15 #include <linux/pci_hotplug.h> 16 #include <linux/module.h> 17 #include <linux/pci-aspm.h> 18 #include <linux/pci-acpi.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/pm_qos.h> 21 #include "pci.h" 22 23 /* 24 * The GUID is defined in the PCI Firmware Specification available here: 25 * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf 26 */ 27 const guid_t pci_acpi_dsm_guid = 28 GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a, 29 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d); 30 31 #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) 32 static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) 33 { 34 struct device *dev = &adev->dev; 35 struct resource_entry *entry; 36 struct list_head list; 37 unsigned long flags; 38 int ret; 39 40 INIT_LIST_HEAD(&list); 41 flags = IORESOURCE_MEM; 42 ret = acpi_dev_get_resources(adev, &list, 43 acpi_dev_filter_resource_type_cb, 44 (void *) flags); 45 if (ret < 0) { 46 dev_err(dev, "failed to parse _CRS method, error code %d\n", 47 ret); 48 return ret; 49 } 50 51 if (ret == 0) { 52 dev_err(dev, "no IO and memory resources present in _CRS\n"); 53 return -EINVAL; 54 } 55 56 entry = list_first_entry(&list, struct resource_entry, node); 57 *res = *entry->res; 58 acpi_dev_free_resource_list(&list); 59 return 0; 60 } 61 62 static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context, 63 void **retval) 64 { 65 u16 *segment = context; 66 unsigned long long uid; 67 acpi_status status; 68 69 status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); 70 if (ACPI_FAILURE(status) || uid != *segment) 71 return AE_CTRL_DEPTH; 72 73 *(acpi_handle *)retval = handle; 74 return AE_CTRL_TERMINATE; 75 } 76 77 int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, 78 struct resource *res) 79 { 80 struct acpi_device *adev; 81 acpi_status status; 82 acpi_handle handle; 83 int ret; 84 85 status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle); 86 if (ACPI_FAILURE(status)) { 87 dev_err(dev, "can't find _HID %s device to locate resources\n", 88 hid); 89 return -ENODEV; 90 } 91 92 ret = acpi_bus_get_device(handle, &adev); 93 if (ret) 94 return ret; 95 96 ret = acpi_get_rc_addr(adev, res); 97 if (ret) { 98 dev_err(dev, "can't get resource from %s\n", 99 dev_name(&adev->dev)); 100 return ret; 101 } 102 103 return 0; 104 } 105 #endif 106 107 phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) 108 { 109 acpi_status status = AE_NOT_EXIST; 110 unsigned long long mcfg_addr; 111 112 if (handle) 113 status = acpi_evaluate_integer(handle, METHOD_NAME__CBA, 114 NULL, &mcfg_addr); 115 if (ACPI_FAILURE(status)) 116 return 0; 117 118 return (phys_addr_t)mcfg_addr; 119 } 120 121 static acpi_status decode_type0_hpx_record(union acpi_object *record, 122 struct hpp_type0 *hpx0) 123 { 124 int i; 125 union acpi_object *fields = record->package.elements; 126 u32 revision = fields[1].integer.value; 127 128 switch (revision) { 129 case 1: 130 if (record->package.count != 6) 131 return AE_ERROR; 132 for (i = 2; i < 6; i++) 133 if (fields[i].type != ACPI_TYPE_INTEGER) 134 return AE_ERROR; 135 hpx0->revision = revision; 136 hpx0->cache_line_size = fields[2].integer.value; 137 hpx0->latency_timer = fields[3].integer.value; 138 hpx0->enable_serr = fields[4].integer.value; 139 hpx0->enable_perr = fields[5].integer.value; 140 break; 141 default: 142 pr_warn("%s: Type 0 Revision %d record not supported\n", 143 __func__, revision); 144 return AE_ERROR; 145 } 146 return AE_OK; 147 } 148 149 static acpi_status decode_type1_hpx_record(union acpi_object *record, 150 struct hpp_type1 *hpx1) 151 { 152 int i; 153 union acpi_object *fields = record->package.elements; 154 u32 revision = fields[1].integer.value; 155 156 switch (revision) { 157 case 1: 158 if (record->package.count != 5) 159 return AE_ERROR; 160 for (i = 2; i < 5; i++) 161 if (fields[i].type != ACPI_TYPE_INTEGER) 162 return AE_ERROR; 163 hpx1->revision = revision; 164 hpx1->max_mem_read = fields[2].integer.value; 165 hpx1->avg_max_split = fields[3].integer.value; 166 hpx1->tot_max_split = fields[4].integer.value; 167 break; 168 default: 169 pr_warn("%s: Type 1 Revision %d record not supported\n", 170 __func__, revision); 171 return AE_ERROR; 172 } 173 return AE_OK; 174 } 175 176 static acpi_status decode_type2_hpx_record(union acpi_object *record, 177 struct hpp_type2 *hpx2) 178 { 179 int i; 180 union acpi_object *fields = record->package.elements; 181 u32 revision = fields[1].integer.value; 182 183 switch (revision) { 184 case 1: 185 if (record->package.count != 18) 186 return AE_ERROR; 187 for (i = 2; i < 18; i++) 188 if (fields[i].type != ACPI_TYPE_INTEGER) 189 return AE_ERROR; 190 hpx2->revision = revision; 191 hpx2->unc_err_mask_and = fields[2].integer.value; 192 hpx2->unc_err_mask_or = fields[3].integer.value; 193 hpx2->unc_err_sever_and = fields[4].integer.value; 194 hpx2->unc_err_sever_or = fields[5].integer.value; 195 hpx2->cor_err_mask_and = fields[6].integer.value; 196 hpx2->cor_err_mask_or = fields[7].integer.value; 197 hpx2->adv_err_cap_and = fields[8].integer.value; 198 hpx2->adv_err_cap_or = fields[9].integer.value; 199 hpx2->pci_exp_devctl_and = fields[10].integer.value; 200 hpx2->pci_exp_devctl_or = fields[11].integer.value; 201 hpx2->pci_exp_lnkctl_and = fields[12].integer.value; 202 hpx2->pci_exp_lnkctl_or = fields[13].integer.value; 203 hpx2->sec_unc_err_sever_and = fields[14].integer.value; 204 hpx2->sec_unc_err_sever_or = fields[15].integer.value; 205 hpx2->sec_unc_err_mask_and = fields[16].integer.value; 206 hpx2->sec_unc_err_mask_or = fields[17].integer.value; 207 break; 208 default: 209 pr_warn("%s: Type 2 Revision %d record not supported\n", 210 __func__, revision); 211 return AE_ERROR; 212 } 213 return AE_OK; 214 } 215 216 static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, 217 union acpi_object *reg_fields) 218 { 219 hpx3_reg->device_type = reg_fields[0].integer.value; 220 hpx3_reg->function_type = reg_fields[1].integer.value; 221 hpx3_reg->config_space_location = reg_fields[2].integer.value; 222 hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value; 223 hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value; 224 hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value; 225 hpx3_reg->dvsec_id = reg_fields[6].integer.value; 226 hpx3_reg->dvsec_rev = reg_fields[7].integer.value; 227 hpx3_reg->match_offset = reg_fields[8].integer.value; 228 hpx3_reg->match_mask_and = reg_fields[9].integer.value; 229 hpx3_reg->match_value = reg_fields[10].integer.value; 230 hpx3_reg->reg_offset = reg_fields[11].integer.value; 231 hpx3_reg->reg_mask_and = reg_fields[12].integer.value; 232 hpx3_reg->reg_mask_or = reg_fields[13].integer.value; 233 } 234 235 static acpi_status program_type3_hpx_record(struct pci_dev *dev, 236 union acpi_object *record, 237 const struct hotplug_program_ops *hp_ops) 238 { 239 union acpi_object *fields = record->package.elements; 240 u32 desc_count, expected_length, revision; 241 union acpi_object *reg_fields; 242 struct hpx_type3 hpx3; 243 int i; 244 245 revision = fields[1].integer.value; 246 switch (revision) { 247 case 1: 248 desc_count = fields[2].integer.value; 249 expected_length = 3 + desc_count * 14; 250 251 if (record->package.count != expected_length) 252 return AE_ERROR; 253 254 for (i = 2; i < expected_length; i++) 255 if (fields[i].type != ACPI_TYPE_INTEGER) 256 return AE_ERROR; 257 258 for (i = 0; i < desc_count; i++) { 259 reg_fields = fields + 3 + i * 14; 260 parse_hpx3_register(&hpx3, reg_fields); 261 hp_ops->program_type3(dev, &hpx3); 262 } 263 264 break; 265 default: 266 printk(KERN_WARNING 267 "%s: Type 3 Revision %d record not supported\n", 268 __func__, revision); 269 return AE_ERROR; 270 } 271 return AE_OK; 272 } 273 274 static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle, 275 const struct hotplug_program_ops *hp_ops) 276 { 277 acpi_status status; 278 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 279 union acpi_object *package, *record, *fields; 280 struct hpp_type0 hpx0; 281 struct hpp_type1 hpx1; 282 struct hpp_type2 hpx2; 283 u32 type; 284 int i; 285 286 status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); 287 if (ACPI_FAILURE(status)) 288 return status; 289 290 package = (union acpi_object *)buffer.pointer; 291 if (package->type != ACPI_TYPE_PACKAGE) { 292 status = AE_ERROR; 293 goto exit; 294 } 295 296 for (i = 0; i < package->package.count; i++) { 297 record = &package->package.elements[i]; 298 if (record->type != ACPI_TYPE_PACKAGE) { 299 status = AE_ERROR; 300 goto exit; 301 } 302 303 fields = record->package.elements; 304 if (fields[0].type != ACPI_TYPE_INTEGER || 305 fields[1].type != ACPI_TYPE_INTEGER) { 306 status = AE_ERROR; 307 goto exit; 308 } 309 310 type = fields[0].integer.value; 311 switch (type) { 312 case 0: 313 memset(&hpx0, 0, sizeof(hpx0)); 314 status = decode_type0_hpx_record(record, &hpx0); 315 if (ACPI_FAILURE(status)) 316 goto exit; 317 hp_ops->program_type0(dev, &hpx0); 318 break; 319 case 1: 320 memset(&hpx1, 0, sizeof(hpx1)); 321 status = decode_type1_hpx_record(record, &hpx1); 322 if (ACPI_FAILURE(status)) 323 goto exit; 324 hp_ops->program_type1(dev, &hpx1); 325 break; 326 case 2: 327 memset(&hpx2, 0, sizeof(hpx2)); 328 status = decode_type2_hpx_record(record, &hpx2); 329 if (ACPI_FAILURE(status)) 330 goto exit; 331 hp_ops->program_type2(dev, &hpx2); 332 break; 333 case 3: 334 status = program_type3_hpx_record(dev, record, hp_ops); 335 if (ACPI_FAILURE(status)) 336 goto exit; 337 break; 338 default: 339 pr_err("%s: Type %d record not supported\n", 340 __func__, type); 341 status = AE_ERROR; 342 goto exit; 343 } 344 } 345 exit: 346 kfree(buffer.pointer); 347 return status; 348 } 349 350 static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle, 351 const struct hotplug_program_ops *hp_ops) 352 { 353 acpi_status status; 354 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 355 union acpi_object *package, *fields; 356 struct hpp_type0 hpp0; 357 int i; 358 359 memset(&hpp0, 0, sizeof(hpp0)); 360 361 status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); 362 if (ACPI_FAILURE(status)) 363 return status; 364 365 package = (union acpi_object *) buffer.pointer; 366 if (package->type != ACPI_TYPE_PACKAGE || 367 package->package.count != 4) { 368 status = AE_ERROR; 369 goto exit; 370 } 371 372 fields = package->package.elements; 373 for (i = 0; i < 4; i++) { 374 if (fields[i].type != ACPI_TYPE_INTEGER) { 375 status = AE_ERROR; 376 goto exit; 377 } 378 } 379 380 hpp0.revision = 1; 381 hpp0.cache_line_size = fields[0].integer.value; 382 hpp0.latency_timer = fields[1].integer.value; 383 hpp0.enable_serr = fields[2].integer.value; 384 hpp0.enable_perr = fields[3].integer.value; 385 386 hp_ops->program_type0(dev, &hpp0); 387 388 exit: 389 kfree(buffer.pointer); 390 return status; 391 } 392 393 /* pci_get_hp_params 394 * 395 * @dev - the pci_dev for which we want parameters 396 * @hpp - allocated by the caller 397 */ 398 int pci_acpi_program_hp_params(struct pci_dev *dev, 399 const struct hotplug_program_ops *hp_ops) 400 { 401 acpi_status status; 402 acpi_handle handle, phandle; 403 struct pci_bus *pbus; 404 405 if (acpi_pci_disabled) 406 return -ENODEV; 407 408 handle = NULL; 409 for (pbus = dev->bus; pbus; pbus = pbus->parent) { 410 handle = acpi_pci_get_bridge_handle(pbus); 411 if (handle) 412 break; 413 } 414 415 /* 416 * _HPP settings apply to all child buses, until another _HPP is 417 * encountered. If we don't find an _HPP for the input pci dev, 418 * look for it in the parent device scope since that would apply to 419 * this pci dev. 420 */ 421 while (handle) { 422 status = acpi_run_hpx(dev, handle, hp_ops); 423 if (ACPI_SUCCESS(status)) 424 return 0; 425 status = acpi_run_hpp(dev, handle, hp_ops); 426 if (ACPI_SUCCESS(status)) 427 return 0; 428 if (acpi_is_root_bridge(handle)) 429 break; 430 status = acpi_get_parent(handle, &phandle); 431 if (ACPI_FAILURE(status)) 432 break; 433 handle = phandle; 434 } 435 return -ENODEV; 436 } 437 438 /** 439 * pciehp_is_native - Check whether a hotplug port is handled by the OS 440 * @bridge: Hotplug port to check 441 * 442 * Returns true if the given @bridge is handled by the native PCIe hotplug 443 * driver. 444 */ 445 bool pciehp_is_native(struct pci_dev *bridge) 446 { 447 const struct pci_host_bridge *host; 448 u32 slot_cap; 449 450 if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) 451 return false; 452 453 pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap); 454 if (!(slot_cap & PCI_EXP_SLTCAP_HPC)) 455 return false; 456 457 if (pcie_ports_native) 458 return true; 459 460 host = pci_find_host_bridge(bridge->bus); 461 return host->native_pcie_hotplug; 462 } 463 464 /** 465 * shpchp_is_native - Check whether a hotplug port is handled by the OS 466 * @bridge: Hotplug port to check 467 * 468 * Returns true if the given @bridge is handled by the native SHPC hotplug 469 * driver. 470 */ 471 bool shpchp_is_native(struct pci_dev *bridge) 472 { 473 return bridge->shpc_managed; 474 } 475 476 /** 477 * pci_acpi_wake_bus - Root bus wakeup notification fork function. 478 * @context: Device wakeup context. 479 */ 480 static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context) 481 { 482 struct acpi_device *adev; 483 struct acpi_pci_root *root; 484 485 adev = container_of(context, struct acpi_device, wakeup.context); 486 root = acpi_driver_data(adev); 487 pci_pme_wakeup_bus(root->bus); 488 } 489 490 /** 491 * pci_acpi_wake_dev - PCI device wakeup notification work function. 492 * @context: Device wakeup context. 493 */ 494 static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context) 495 { 496 struct pci_dev *pci_dev; 497 498 pci_dev = to_pci_dev(context->dev); 499 500 if (pci_dev->pme_poll) 501 pci_dev->pme_poll = false; 502 503 if (pci_dev->current_state == PCI_D3cold) { 504 pci_wakeup_event(pci_dev); 505 pm_request_resume(&pci_dev->dev); 506 return; 507 } 508 509 /* Clear PME Status if set. */ 510 if (pci_dev->pme_support) 511 pci_check_pme_status(pci_dev); 512 513 pci_wakeup_event(pci_dev); 514 pm_request_resume(&pci_dev->dev); 515 516 pci_pme_wakeup_bus(pci_dev->subordinate); 517 } 518 519 /** 520 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus. 521 * @dev: PCI root bridge ACPI device. 522 */ 523 acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev) 524 { 525 return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus); 526 } 527 528 /** 529 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. 530 * @dev: ACPI device to add the notifier for. 531 * @pci_dev: PCI device to check for the PME status if an event is signaled. 532 */ 533 acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, 534 struct pci_dev *pci_dev) 535 { 536 return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev); 537 } 538 539 /* 540 * _SxD returns the D-state with the highest power 541 * (lowest D-state number) supported in the S-state "x". 542 * 543 * If the devices does not have a _PRW 544 * (Power Resources for Wake) supporting system wakeup from "x" 545 * then the OS is free to choose a lower power (higher number 546 * D-state) than the return value from _SxD. 547 * 548 * But if _PRW is enabled at S-state "x", the OS 549 * must not choose a power lower than _SxD -- 550 * unless the device has an _SxW method specifying 551 * the lowest power (highest D-state number) the device 552 * may enter while still able to wake the system. 553 * 554 * ie. depending on global OS policy: 555 * 556 * if (_PRW at S-state x) 557 * choose from highest power _SxD to lowest power _SxW 558 * else // no _PRW at S-state x 559 * choose highest power _SxD or any lower power 560 */ 561 562 static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) 563 { 564 int acpi_state, d_max; 565 566 if (pdev->no_d3cold) 567 d_max = ACPI_STATE_D3_HOT; 568 else 569 d_max = ACPI_STATE_D3_COLD; 570 acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max); 571 if (acpi_state < 0) 572 return PCI_POWER_ERROR; 573 574 switch (acpi_state) { 575 case ACPI_STATE_D0: 576 return PCI_D0; 577 case ACPI_STATE_D1: 578 return PCI_D1; 579 case ACPI_STATE_D2: 580 return PCI_D2; 581 case ACPI_STATE_D3_HOT: 582 return PCI_D3hot; 583 case ACPI_STATE_D3_COLD: 584 return PCI_D3cold; 585 } 586 return PCI_POWER_ERROR; 587 } 588 589 static struct acpi_device *acpi_pci_find_companion(struct device *dev); 590 591 static bool acpi_pci_bridge_d3(struct pci_dev *dev) 592 { 593 const struct fwnode_handle *fwnode; 594 struct acpi_device *adev; 595 struct pci_dev *root; 596 u8 val; 597 598 if (!dev->is_hotplug_bridge) 599 return false; 600 601 /* 602 * Look for a special _DSD property for the root port and if it 603 * is set we know the hierarchy behind it supports D3 just fine. 604 */ 605 root = pci_find_pcie_root_port(dev); 606 if (!root) 607 return false; 608 609 adev = ACPI_COMPANION(&root->dev); 610 if (root == dev) { 611 /* 612 * It is possible that the ACPI companion is not yet bound 613 * for the root port so look it up manually here. 614 */ 615 if (!adev && !pci_dev_is_added(root)) 616 adev = acpi_pci_find_companion(&root->dev); 617 } 618 619 if (!adev) 620 return false; 621 622 fwnode = acpi_fwnode_handle(adev); 623 if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val)) 624 return false; 625 626 return val == 1; 627 } 628 629 static bool acpi_pci_power_manageable(struct pci_dev *dev) 630 { 631 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 632 return adev ? acpi_device_power_manageable(adev) : false; 633 } 634 635 static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) 636 { 637 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 638 static const u8 state_conv[] = { 639 [PCI_D0] = ACPI_STATE_D0, 640 [PCI_D1] = ACPI_STATE_D1, 641 [PCI_D2] = ACPI_STATE_D2, 642 [PCI_D3hot] = ACPI_STATE_D3_HOT, 643 [PCI_D3cold] = ACPI_STATE_D3_COLD, 644 }; 645 int error = -EINVAL; 646 647 /* If the ACPI device has _EJ0, ignore the device */ 648 if (!adev || acpi_has_method(adev->handle, "_EJ0")) 649 return -ENODEV; 650 651 switch (state) { 652 case PCI_D3cold: 653 if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == 654 PM_QOS_FLAGS_ALL) { 655 error = -EBUSY; 656 break; 657 } 658 /* Fall through */ 659 case PCI_D0: 660 case PCI_D1: 661 case PCI_D2: 662 case PCI_D3hot: 663 error = acpi_device_set_power(adev, state_conv[state]); 664 } 665 666 if (!error) 667 pci_dbg(dev, "power state changed by ACPI to %s\n", 668 acpi_power_state_string(state_conv[state])); 669 670 return error; 671 } 672 673 static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) 674 { 675 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 676 static const pci_power_t state_conv[] = { 677 [ACPI_STATE_D0] = PCI_D0, 678 [ACPI_STATE_D1] = PCI_D1, 679 [ACPI_STATE_D2] = PCI_D2, 680 [ACPI_STATE_D3_HOT] = PCI_D3hot, 681 [ACPI_STATE_D3_COLD] = PCI_D3cold, 682 }; 683 int state; 684 685 if (!adev || !acpi_device_power_manageable(adev)) 686 return PCI_UNKNOWN; 687 688 if (acpi_device_get_power(adev, &state) || state == ACPI_STATE_UNKNOWN) 689 return PCI_UNKNOWN; 690 691 return state_conv[state]; 692 } 693 694 static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) 695 { 696 while (bus->parent) { 697 if (acpi_pm_device_can_wakeup(&bus->self->dev)) 698 return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable); 699 700 bus = bus->parent; 701 } 702 703 /* We have reached the root bus. */ 704 if (bus->bridge) { 705 if (acpi_pm_device_can_wakeup(bus->bridge)) 706 return acpi_pm_set_bridge_wakeup(bus->bridge, enable); 707 } 708 return 0; 709 } 710 711 static int acpi_pci_wakeup(struct pci_dev *dev, bool enable) 712 { 713 if (acpi_pm_device_can_wakeup(&dev->dev)) 714 return acpi_pm_set_device_wakeup(&dev->dev, enable); 715 716 return acpi_pci_propagate_wakeup(dev->bus, enable); 717 } 718 719 static bool acpi_pci_need_resume(struct pci_dev *dev) 720 { 721 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 722 723 /* 724 * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over 725 * system-wide suspend/resume confuses the platform firmware, so avoid 726 * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint 727 * devices are expected to be in D3 before invoking the S3 entry path 728 * from the firmware, so they should not be affected by this issue. 729 */ 730 if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0) 731 return true; 732 733 if (!adev || !acpi_device_power_manageable(adev)) 734 return false; 735 736 if (adev->wakeup.flags.valid && 737 device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) 738 return true; 739 740 if (acpi_target_system_state() == ACPI_STATE_S0) 741 return false; 742 743 return !!adev->power.flags.dsw_present; 744 } 745 746 static const struct pci_platform_pm_ops acpi_pci_platform_pm = { 747 .bridge_d3 = acpi_pci_bridge_d3, 748 .is_manageable = acpi_pci_power_manageable, 749 .set_state = acpi_pci_set_power_state, 750 .get_state = acpi_pci_get_power_state, 751 .choose_state = acpi_pci_choose_state, 752 .set_wakeup = acpi_pci_wakeup, 753 .need_resume = acpi_pci_need_resume, 754 }; 755 756 void acpi_pci_add_bus(struct pci_bus *bus) 757 { 758 union acpi_object *obj; 759 struct pci_host_bridge *bridge; 760 761 if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) 762 return; 763 764 acpi_pci_slot_enumerate(bus); 765 acpiphp_enumerate_slots(bus); 766 767 /* 768 * For a host bridge, check its _DSM for function 8 and if 769 * that is available, mark it in pci_host_bridge. 770 */ 771 if (!pci_is_root_bus(bus)) 772 return; 773 774 obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3, 775 RESET_DELAY_DSM, NULL); 776 if (!obj) 777 return; 778 779 if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) { 780 bridge = pci_find_host_bridge(bus); 781 bridge->ignore_reset_delay = 1; 782 } 783 ACPI_FREE(obj); 784 } 785 786 void acpi_pci_remove_bus(struct pci_bus *bus) 787 { 788 if (acpi_pci_disabled || !bus->bridge) 789 return; 790 791 acpiphp_remove_slots(bus); 792 acpi_pci_slot_remove(bus); 793 } 794 795 /* ACPI bus type */ 796 static struct acpi_device *acpi_pci_find_companion(struct device *dev) 797 { 798 struct pci_dev *pci_dev = to_pci_dev(dev); 799 bool check_children; 800 u64 addr; 801 802 check_children = pci_is_bridge(pci_dev); 803 /* Please ref to ACPI spec for the syntax of _ADR */ 804 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); 805 return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr, 806 check_children); 807 } 808 809 /** 810 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI 811 * @pdev: the PCI device whose delay is to be updated 812 * @handle: ACPI handle of this device 813 * 814 * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM 815 * control method of either the device itself or the PCI host bridge. 816 * 817 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI 818 * host bridge. If it returns one, the OS may assume that all devices in 819 * the hierarchy have already completed power-on reset delays. 820 * 821 * Function 9, "Device Readiness Durations," applies only to the object 822 * where it is located. It returns delay durations required after various 823 * events if the device requires less time than the spec requires. Delays 824 * from this function take precedence over the Reset Delay function. 825 * 826 * These _DSM functions are defined by the draft ECN of January 28, 2014, 827 * titled "ACPI additions for FW latency optimizations." 828 */ 829 static void pci_acpi_optimize_delay(struct pci_dev *pdev, 830 acpi_handle handle) 831 { 832 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); 833 int value; 834 union acpi_object *obj, *elements; 835 836 if (bridge->ignore_reset_delay) 837 pdev->d3cold_delay = 0; 838 839 obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3, 840 FUNCTION_DELAY_DSM, NULL); 841 if (!obj) 842 return; 843 844 if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) { 845 elements = obj->package.elements; 846 if (elements[0].type == ACPI_TYPE_INTEGER) { 847 value = (int)elements[0].integer.value / 1000; 848 if (value < PCI_PM_D3COLD_WAIT) 849 pdev->d3cold_delay = value; 850 } 851 if (elements[3].type == ACPI_TYPE_INTEGER) { 852 value = (int)elements[3].integer.value / 1000; 853 if (value < PCI_PM_D3_WAIT) 854 pdev->d3_delay = value; 855 } 856 } 857 ACPI_FREE(obj); 858 } 859 860 static void pci_acpi_set_untrusted(struct pci_dev *dev) 861 { 862 u8 val; 863 864 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) 865 return; 866 if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val)) 867 return; 868 869 /* 870 * These root ports expose PCIe (including DMA) outside of the 871 * system so make sure we treat them and everything behind as 872 * untrusted. 873 */ 874 if (val) 875 dev->untrusted = 1; 876 } 877 878 static void pci_acpi_setup(struct device *dev) 879 { 880 struct pci_dev *pci_dev = to_pci_dev(dev); 881 struct acpi_device *adev = ACPI_COMPANION(dev); 882 883 if (!adev) 884 return; 885 886 pci_acpi_optimize_delay(pci_dev, adev->handle); 887 pci_acpi_set_untrusted(pci_dev); 888 889 pci_acpi_add_pm_notifier(adev, pci_dev); 890 if (!adev->wakeup.flags.valid) 891 return; 892 893 device_set_wakeup_capable(dev, true); 894 /* 895 * For bridges that can do D3 we enable wake automatically (as 896 * we do for the power management itself in that case). The 897 * reason is that the bridge may have additional methods such as 898 * _DSW that need to be called. 899 */ 900 if (pci_dev->bridge_d3) 901 device_wakeup_enable(dev); 902 903 acpi_pci_wakeup(pci_dev, false); 904 } 905 906 static void pci_acpi_cleanup(struct device *dev) 907 { 908 struct acpi_device *adev = ACPI_COMPANION(dev); 909 struct pci_dev *pci_dev = to_pci_dev(dev); 910 911 if (!adev) 912 return; 913 914 pci_acpi_remove_pm_notifier(adev); 915 if (adev->wakeup.flags.valid) { 916 if (pci_dev->bridge_d3) 917 device_wakeup_disable(dev); 918 919 device_set_wakeup_capable(dev, false); 920 } 921 } 922 923 static bool pci_acpi_bus_match(struct device *dev) 924 { 925 return dev_is_pci(dev); 926 } 927 928 static struct acpi_bus_type acpi_pci_bus = { 929 .name = "PCI", 930 .match = pci_acpi_bus_match, 931 .find_companion = acpi_pci_find_companion, 932 .setup = pci_acpi_setup, 933 .cleanup = pci_acpi_cleanup, 934 }; 935 936 937 static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev); 938 939 /** 940 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode 941 * @fn: Callback matching a device to a fwnode that identifies a PCI 942 * MSI domain. 943 * 944 * This should be called by irqchip driver, which is the parent of 945 * the MSI domain to provide callback interface to query fwnode. 946 */ 947 void 948 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)) 949 { 950 pci_msi_get_fwnode_cb = fn; 951 } 952 953 /** 954 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge 955 * @bus: The PCI host bridge bus. 956 * 957 * This function uses the callback function registered by 958 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with 959 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus. 960 * This returns NULL on error or when the domain is not found. 961 */ 962 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) 963 { 964 struct fwnode_handle *fwnode; 965 966 if (!pci_msi_get_fwnode_cb) 967 return NULL; 968 969 fwnode = pci_msi_get_fwnode_cb(&bus->dev); 970 if (!fwnode) 971 return NULL; 972 973 return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI); 974 } 975 976 static int __init acpi_pci_init(void) 977 { 978 int ret; 979 980 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) { 981 pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n"); 982 pci_no_msi(); 983 } 984 985 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { 986 pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); 987 pcie_no_aspm(); 988 } 989 990 ret = register_acpi_bus_type(&acpi_pci_bus); 991 if (ret) 992 return 0; 993 994 pci_set_platform_pm(&acpi_pci_platform_pm); 995 acpi_pci_slot_init(); 996 acpiphp_init(); 997 998 return 0; 999 } 1000 arch_initcall(acpi_pci_init); 1001