1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * acpi_processor.c - ACPI processor enumeration support 4 * 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 9 * Copyright (C) 2013, Intel Corporation 10 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 11 */ 12 #define pr_fmt(fmt) "ACPI: " fmt 13 14 #include <linux/acpi.h> 15 #include <linux/cpu.h> 16 #include <linux/device.h> 17 #include <linux/dmi.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/pci.h> 21 #include <linux/platform_device.h> 22 23 #include <acpi/processor.h> 24 25 #include <asm/cpu.h> 26 27 #include <xen/xen.h> 28 29 #include "internal.h" 30 31 DEFINE_PER_CPU(struct acpi_processor *, processors); 32 EXPORT_PER_CPU_SYMBOL(processors); 33 34 /* Errata Handling */ 35 struct acpi_processor_errata errata __read_mostly; 36 EXPORT_SYMBOL_GPL(errata); 37 38 acpi_handle acpi_get_processor_handle(int cpu) 39 { 40 struct acpi_processor *pr; 41 42 pr = per_cpu(processors, cpu); 43 if (pr) 44 return pr->handle; 45 46 return NULL; 47 } 48 49 static int acpi_processor_errata_piix4(struct pci_dev *dev) 50 { 51 u8 value1 = 0; 52 u8 value2 = 0; 53 struct pci_dev *ide_dev = NULL, *isa_dev = NULL; 54 55 56 if (!dev) 57 return -EINVAL; 58 59 /* 60 * Note that 'dev' references the PIIX4 ACPI Controller. 61 */ 62 63 switch (dev->revision) { 64 case 0: 65 dev_dbg(&dev->dev, "Found PIIX4 A-step\n"); 66 break; 67 case 1: 68 dev_dbg(&dev->dev, "Found PIIX4 B-step\n"); 69 break; 70 case 2: 71 dev_dbg(&dev->dev, "Found PIIX4E\n"); 72 break; 73 case 3: 74 dev_dbg(&dev->dev, "Found PIIX4M\n"); 75 break; 76 default: 77 dev_dbg(&dev->dev, "Found unknown PIIX4\n"); 78 break; 79 } 80 81 switch (dev->revision) { 82 83 case 0: /* PIIX4 A-step */ 84 case 1: /* PIIX4 B-step */ 85 /* 86 * See specification changes #13 ("Manual Throttle Duty Cycle") 87 * and #14 ("Enabling and Disabling Manual Throttle"), plus 88 * erratum #5 ("STPCLK# Deassertion Time") from the January 89 * 2002 PIIX4 specification update. Applies to only older 90 * PIIX4 models. 91 */ 92 errata.piix4.throttle = 1; 93 fallthrough; 94 95 case 2: /* PIIX4E */ 96 case 3: /* PIIX4M */ 97 /* 98 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 99 * Livelock") from the January 2002 PIIX4 specification update. 100 * Applies to all PIIX4 models. 101 */ 102 103 /* 104 * BM-IDE 105 * ------ 106 * Find the PIIX4 IDE Controller and get the Bus Master IDE 107 * Status register address. We'll use this later to read 108 * each IDE controller's DMA status to make sure we catch all 109 * DMA activity. 110 */ 111 ide_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 112 PCI_DEVICE_ID_INTEL_82371AB, 113 PCI_ANY_ID, PCI_ANY_ID, NULL); 114 if (ide_dev) { 115 errata.piix4.bmisx = pci_resource_start(ide_dev, 4); 116 pci_dev_put(ide_dev); 117 } 118 119 /* 120 * Type-F DMA 121 * ---------- 122 * Find the PIIX4 ISA Controller and read the Motherboard 123 * DMA controller's status to see if Type-F (Fast) DMA mode 124 * is enabled (bit 7) on either channel. Note that we'll 125 * disable C3 support if this is enabled, as some legacy 126 * devices won't operate well if fast DMA is disabled. 127 */ 128 isa_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 129 PCI_DEVICE_ID_INTEL_82371AB_0, 130 PCI_ANY_ID, PCI_ANY_ID, NULL); 131 if (isa_dev) { 132 pci_read_config_byte(isa_dev, 0x76, &value1); 133 pci_read_config_byte(isa_dev, 0x77, &value2); 134 if ((value1 & 0x80) || (value2 & 0x80)) 135 errata.piix4.fdma = 1; 136 pci_dev_put(isa_dev); 137 } 138 139 break; 140 } 141 142 if (ide_dev) 143 dev_dbg(&ide_dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n"); 144 145 if (isa_dev) 146 dev_dbg(&isa_dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n"); 147 148 return 0; 149 } 150 151 static int acpi_processor_errata(void) 152 { 153 int result = 0; 154 struct pci_dev *dev = NULL; 155 156 /* 157 * PIIX4 158 */ 159 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 160 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, 161 PCI_ANY_ID, NULL); 162 if (dev) { 163 result = acpi_processor_errata_piix4(dev); 164 pci_dev_put(dev); 165 } 166 167 return result; 168 } 169 170 /* Create a platform device to represent a CPU frequency control mechanism. */ 171 static void cpufreq_add_device(const char *name) 172 { 173 struct platform_device *pdev; 174 175 pdev = platform_device_register_simple(name, PLATFORM_DEVID_NONE, NULL, 0); 176 if (IS_ERR(pdev)) 177 pr_info("%s device creation failed: %pe\n", name, pdev); 178 } 179 180 #ifdef CONFIG_X86 181 /* Check presence of Processor Clocking Control by searching for \_SB.PCCH. */ 182 static void __init acpi_pcc_cpufreq_init(void) 183 { 184 acpi_status status; 185 acpi_handle handle; 186 187 status = acpi_get_handle(NULL, "\\_SB", &handle); 188 if (ACPI_FAILURE(status)) 189 return; 190 191 if (acpi_has_method(handle, "PCCH")) 192 cpufreq_add_device("pcc-cpufreq"); 193 } 194 #else 195 static void __init acpi_pcc_cpufreq_init(void) {} 196 #endif /* CONFIG_X86 */ 197 198 /* Initialization */ 199 static DEFINE_PER_CPU(void *, processor_device_array); 200 201 static int acpi_processor_set_per_cpu(struct acpi_processor *pr, 202 struct acpi_device *device) 203 { 204 BUG_ON(pr->id >= nr_cpu_ids); 205 206 /* 207 * Buggy BIOS check. 208 * ACPI id of processors can be reported wrongly by the BIOS. 209 * Don't trust it blindly 210 */ 211 if (per_cpu(processor_device_array, pr->id) != NULL && 212 per_cpu(processor_device_array, pr->id) != device) { 213 dev_warn(&device->dev, 214 "BIOS reported wrong ACPI id %d for the processor\n", 215 pr->id); 216 return -EINVAL; 217 } 218 /* 219 * processor_device_array is not cleared on errors to allow buggy BIOS 220 * checks. 221 */ 222 per_cpu(processor_device_array, pr->id) = device; 223 per_cpu(processors, pr->id) = pr; 224 225 return 0; 226 } 227 228 #ifdef CONFIG_ACPI_HOTPLUG_CPU 229 static int acpi_processor_hotadd_init(struct acpi_processor *pr, 230 struct acpi_device *device) 231 { 232 int ret; 233 234 if (invalid_phys_cpuid(pr->phys_id)) 235 return -ENODEV; 236 237 cpu_maps_update_begin(); 238 cpus_write_lock(); 239 240 ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id); 241 if (ret) 242 goto out; 243 244 ret = acpi_processor_set_per_cpu(pr, device); 245 if (ret) { 246 acpi_unmap_cpu(pr->id); 247 goto out; 248 } 249 250 ret = arch_register_cpu(pr->id); 251 if (ret) { 252 /* Leave the processor device array in place to detect buggy bios */ 253 per_cpu(processors, pr->id) = NULL; 254 acpi_unmap_cpu(pr->id); 255 goto out; 256 } 257 258 /* 259 * CPU got hot-added, but cpu_data is not initialized yet. Do 260 * cpu_idle/throttling initialization when the CPU gets online for 261 * the first time. 262 */ 263 pr_info("CPU%d has been hot-added\n", pr->id); 264 265 out: 266 cpus_write_unlock(); 267 cpu_maps_update_done(); 268 return ret; 269 } 270 #else 271 static inline int acpi_processor_hotadd_init(struct acpi_processor *pr, 272 struct acpi_device *device) 273 { 274 return -ENODEV; 275 } 276 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 277 278 static int acpi_processor_get_info(struct acpi_device *device) 279 { 280 union acpi_object object = { .processor = { 0 } }; 281 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 282 struct acpi_processor *pr = acpi_driver_data(device); 283 int device_declaration = 0; 284 acpi_status status = AE_OK; 285 static int cpu0_initialized; 286 unsigned long long value; 287 int ret; 288 289 acpi_processor_errata(); 290 291 /* 292 * Check to see if we have bus mastering arbitration control. This 293 * is required for proper C3 usage (to maintain cache coherency). 294 */ 295 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { 296 pr->flags.bm_control = 1; 297 dev_dbg(&device->dev, "Bus mastering arbitration control present\n"); 298 } else 299 dev_dbg(&device->dev, "No bus mastering arbitration control\n"); 300 301 if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { 302 /* Declared with "Processor" statement; match ProcessorID */ 303 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 304 if (ACPI_FAILURE(status)) { 305 dev_err(&device->dev, 306 "Failed to evaluate processor object (0x%x)\n", 307 status); 308 return -ENODEV; 309 } 310 311 pr->acpi_id = object.processor.proc_id; 312 } else { 313 /* 314 * Declared with "Device" statement; match _UID. 315 */ 316 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, 317 NULL, &value); 318 if (ACPI_FAILURE(status)) { 319 dev_err(&device->dev, 320 "Failed to evaluate processor _UID (0x%x)\n", 321 status); 322 return -ENODEV; 323 } 324 device_declaration = 1; 325 pr->acpi_id = value; 326 } 327 328 if (acpi_duplicate_processor_id(pr->acpi_id)) { 329 if (pr->acpi_id == 0xff) 330 dev_info_once(&device->dev, 331 "Entry not well-defined, consider updating BIOS\n"); 332 else 333 dev_err(&device->dev, 334 "Failed to get unique processor _UID (0x%x)\n", 335 pr->acpi_id); 336 return -ENODEV; 337 } 338 339 pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, 340 pr->acpi_id); 341 if (invalid_phys_cpuid(pr->phys_id)) 342 dev_dbg(&device->dev, "Failed to get CPU physical ID.\n"); 343 344 pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id); 345 if (!cpu0_initialized) { 346 cpu0_initialized = 1; 347 /* 348 * Handle UP system running SMP kernel, with no CPU 349 * entry in MADT 350 */ 351 if (!acpi_has_cpu_in_madt() && invalid_logical_cpuid(pr->id) && 352 (num_online_cpus() == 1)) 353 pr->id = 0; 354 /* 355 * Check availability of Processor Performance Control by 356 * looking at the presence of the _PCT object under the first 357 * processor definition. 358 */ 359 if (acpi_has_method(pr->handle, "_PCT")) 360 cpufreq_add_device("acpi-cpufreq"); 361 } 362 363 /* 364 * This code is not called unless we know the CPU is present and 365 * enabled. The two paths are: 366 * a) Initially present CPUs on architectures that do not defer 367 * their arch_register_cpu() calls until this point. 368 * b) Hotplugged CPUs (enabled bit in _STA has transitioned from not 369 * enabled to enabled) 370 */ 371 if (!get_cpu_device(pr->id)) 372 ret = acpi_processor_hotadd_init(pr, device); 373 else 374 ret = acpi_processor_set_per_cpu(pr, device); 375 if (ret) 376 return ret; 377 378 /* 379 * On some boxes several processors use the same processor bus id. 380 * But they are located in different scope. For example: 381 * \_SB.SCK0.CPU0 382 * \_SB.SCK1.CPU0 383 * Rename the processor device bus id. And the new bus id will be 384 * generated as the following format: 385 * CPU+CPU ID. 386 */ 387 sprintf(acpi_device_bid(device), "CPU%X", pr->id); 388 dev_dbg(&device->dev, "Processor [%d:%d]\n", pr->id, pr->acpi_id); 389 390 if (!object.processor.pblk_address) 391 dev_dbg(&device->dev, "No PBLK (NULL address)\n"); 392 else if (object.processor.pblk_length != 6) 393 dev_err(&device->dev, "Invalid PBLK length [%d]\n", 394 object.processor.pblk_length); 395 else { 396 pr->throttling.address = object.processor.pblk_address; 397 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; 398 pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 399 400 pr->pblk = object.processor.pblk_address; 401 } 402 403 /* 404 * If ACPI describes a slot number for this CPU, we can use it to 405 * ensure we get the right value in the "physical id" field 406 * of /proc/cpuinfo 407 */ 408 status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value); 409 if (ACPI_SUCCESS(status)) 410 arch_fix_phys_package_id(pr->id, value); 411 412 return 0; 413 } 414 415 /* 416 * Do not put anything in here which needs the core to be online. 417 * For example MSR access or setting up things which check for cpuinfo_x86 418 * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. 419 * Such things have to be put in and set up by the processor driver's .probe(). 420 */ 421 static int acpi_processor_add(struct acpi_device *device, 422 const struct acpi_device_id *id) 423 { 424 struct acpi_processor *pr; 425 struct device *dev; 426 int result = 0; 427 428 if (!acpi_device_is_enabled(device)) 429 return -ENODEV; 430 431 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); 432 if (!pr) 433 return -ENOMEM; 434 435 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 436 result = -ENOMEM; 437 goto err_free_pr; 438 } 439 440 pr->handle = device->handle; 441 strscpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 442 strscpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 443 device->driver_data = pr; 444 445 result = acpi_processor_get_info(device); 446 if (result) /* Processor is not physically present or unavailable */ 447 goto err_clear_driver_data; 448 449 dev = get_cpu_device(pr->id); 450 if (!dev) { 451 result = -ENODEV; 452 goto err_clear_per_cpu; 453 } 454 455 result = acpi_bind_one(dev, device); 456 if (result) 457 goto err_clear_per_cpu; 458 459 pr->dev = dev; 460 461 /* Trigger the processor driver's .probe() if present. */ 462 if (device_attach(dev) >= 0) 463 return 1; 464 465 dev_err(dev, "Processor driver could not be attached\n"); 466 acpi_unbind_one(dev); 467 468 err_clear_per_cpu: 469 per_cpu(processors, pr->id) = NULL; 470 err_clear_driver_data: 471 device->driver_data = NULL; 472 free_cpumask_var(pr->throttling.shared_cpu_map); 473 err_free_pr: 474 kfree(pr); 475 return result; 476 } 477 478 #ifdef CONFIG_ACPI_HOTPLUG_CPU 479 /* Removal */ 480 static void acpi_processor_post_eject(struct acpi_device *device) 481 { 482 struct acpi_processor *pr; 483 484 if (!device || !acpi_driver_data(device)) 485 return; 486 487 pr = acpi_driver_data(device); 488 if (pr->id >= nr_cpu_ids) 489 goto out; 490 491 /* 492 * The only reason why we ever get here is CPU hot-removal. The CPU is 493 * already offline and the ACPI device removal locking prevents it from 494 * being put back online at this point. 495 * 496 * Unbind the driver from the processor device and detach it from the 497 * ACPI companion object. 498 */ 499 device_release_driver(pr->dev); 500 acpi_unbind_one(pr->dev); 501 502 cpu_maps_update_begin(); 503 cpus_write_lock(); 504 505 /* Remove the CPU. */ 506 arch_unregister_cpu(pr->id); 507 acpi_unmap_cpu(pr->id); 508 509 /* Clean up. */ 510 per_cpu(processor_device_array, pr->id) = NULL; 511 per_cpu(processors, pr->id) = NULL; 512 513 cpus_write_unlock(); 514 cpu_maps_update_done(); 515 516 try_offline_node(cpu_to_node(pr->id)); 517 518 out: 519 free_cpumask_var(pr->throttling.shared_cpu_map); 520 kfree(pr); 521 } 522 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 523 524 #ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC 525 bool __init processor_physically_present(acpi_handle handle) 526 { 527 int cpuid, type; 528 u32 acpi_id; 529 acpi_status status; 530 acpi_object_type acpi_type; 531 unsigned long long tmp; 532 union acpi_object object = {}; 533 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 534 535 status = acpi_get_type(handle, &acpi_type); 536 if (ACPI_FAILURE(status)) 537 return false; 538 539 switch (acpi_type) { 540 case ACPI_TYPE_PROCESSOR: 541 status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 542 if (ACPI_FAILURE(status)) 543 return false; 544 acpi_id = object.processor.proc_id; 545 break; 546 case ACPI_TYPE_DEVICE: 547 status = acpi_evaluate_integer(handle, METHOD_NAME__UID, 548 NULL, &tmp); 549 if (ACPI_FAILURE(status)) 550 return false; 551 acpi_id = tmp; 552 break; 553 default: 554 return false; 555 } 556 557 if (xen_initial_domain()) 558 /* 559 * When running as a Xen dom0 the number of processors Linux 560 * sees can be different from the real number of processors on 561 * the system, and we still need to execute _PDC or _OSC for 562 * all of them. 563 */ 564 return xen_processor_present(acpi_id); 565 566 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; 567 cpuid = acpi_get_cpuid(handle, type, acpi_id); 568 569 return !invalid_logical_cpuid(cpuid); 570 } 571 572 /* vendor specific UUID indicating an Intel platform */ 573 static u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953"; 574 575 static acpi_status __init acpi_processor_osc(acpi_handle handle, u32 lvl, 576 void *context, void **rv) 577 { 578 u32 capbuf[2] = {}; 579 struct acpi_osc_context osc_context = { 580 .uuid_str = sb_uuid_str, 581 .rev = 1, 582 .cap.length = 8, 583 .cap.pointer = capbuf, 584 }; 585 acpi_status status; 586 587 if (!processor_physically_present(handle)) 588 return AE_OK; 589 590 arch_acpi_set_proc_cap_bits(&capbuf[OSC_SUPPORT_DWORD]); 591 592 status = acpi_run_osc(handle, &osc_context); 593 if (ACPI_FAILURE(status)) 594 return status; 595 596 kfree(osc_context.ret.pointer); 597 598 return AE_OK; 599 } 600 601 static bool __init acpi_early_processor_osc(void) 602 { 603 acpi_status status; 604 605 acpi_proc_quirk_mwait_check(); 606 607 status = acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 608 ACPI_UINT32_MAX, acpi_processor_osc, NULL, 609 NULL, NULL); 610 if (ACPI_FAILURE(status)) 611 return false; 612 613 status = acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_osc, 614 NULL, NULL); 615 if (ACPI_FAILURE(status)) 616 return false; 617 618 return true; 619 } 620 621 void __init acpi_early_processor_control_setup(void) 622 { 623 if (acpi_early_processor_osc()) { 624 pr_debug("_OSC evaluated successfully for all CPUs\n"); 625 } else { 626 pr_debug("_OSC evaluation for CPUs failed, trying _PDC\n"); 627 acpi_early_processor_set_pdc(); 628 } 629 } 630 #endif 631 632 /* 633 * The following ACPI IDs are known to be suitable for representing as 634 * processor devices. 635 */ 636 static const struct acpi_device_id processor_device_ids[] = { 637 638 { ACPI_PROCESSOR_OBJECT_HID, }, 639 { ACPI_PROCESSOR_DEVICE_HID, }, 640 641 { } 642 }; 643 644 static struct acpi_scan_handler processor_handler = { 645 .ids = processor_device_ids, 646 .attach = acpi_processor_add, 647 #ifdef CONFIG_ACPI_HOTPLUG_CPU 648 .post_eject = acpi_processor_post_eject, 649 #endif 650 .hotplug = { 651 .enabled = true, 652 }, 653 }; 654 655 static int acpi_processor_container_attach(struct acpi_device *dev, 656 const struct acpi_device_id *id) 657 { 658 return 1; 659 } 660 661 static const struct acpi_device_id processor_container_ids[] = { 662 { ACPI_PROCESSOR_CONTAINER_HID, }, 663 { } 664 }; 665 666 static struct acpi_scan_handler processor_container_handler = { 667 .ids = processor_container_ids, 668 .attach = acpi_processor_container_attach, 669 }; 670 671 /* The number of the unique processor IDs */ 672 static int nr_unique_ids __initdata; 673 674 /* The number of the duplicate processor IDs */ 675 static int nr_duplicate_ids; 676 677 /* Used to store the unique processor IDs */ 678 static int unique_processor_ids[] __initdata = { 679 [0 ... NR_CPUS - 1] = -1, 680 }; 681 682 /* Used to store the duplicate processor IDs */ 683 static int duplicate_processor_ids[] = { 684 [0 ... NR_CPUS - 1] = -1, 685 }; 686 687 static void __init processor_validated_ids_update(int proc_id) 688 { 689 int i; 690 691 if (nr_unique_ids == NR_CPUS||nr_duplicate_ids == NR_CPUS) 692 return; 693 694 /* 695 * Firstly, compare the proc_id with duplicate IDs, if the proc_id is 696 * already in the IDs, do nothing. 697 */ 698 for (i = 0; i < nr_duplicate_ids; i++) { 699 if (duplicate_processor_ids[i] == proc_id) 700 return; 701 } 702 703 /* 704 * Secondly, compare the proc_id with unique IDs, if the proc_id is in 705 * the IDs, put it in the duplicate IDs. 706 */ 707 for (i = 0; i < nr_unique_ids; i++) { 708 if (unique_processor_ids[i] == proc_id) { 709 duplicate_processor_ids[nr_duplicate_ids] = proc_id; 710 nr_duplicate_ids++; 711 return; 712 } 713 } 714 715 /* 716 * Lastly, the proc_id is a unique ID, put it in the unique IDs. 717 */ 718 unique_processor_ids[nr_unique_ids] = proc_id; 719 nr_unique_ids++; 720 } 721 722 static acpi_status __init acpi_processor_ids_walk(acpi_handle handle, 723 u32 lvl, 724 void *context, 725 void **rv) 726 { 727 acpi_status status; 728 acpi_object_type acpi_type; 729 unsigned long long uid; 730 union acpi_object object = { 0 }; 731 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 732 733 status = acpi_get_type(handle, &acpi_type); 734 if (ACPI_FAILURE(status)) 735 return status; 736 737 switch (acpi_type) { 738 case ACPI_TYPE_PROCESSOR: 739 status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 740 if (ACPI_FAILURE(status)) 741 goto err; 742 uid = object.processor.proc_id; 743 break; 744 745 case ACPI_TYPE_DEVICE: 746 status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); 747 if (ACPI_FAILURE(status)) 748 goto err; 749 break; 750 default: 751 goto err; 752 } 753 754 processor_validated_ids_update(uid); 755 return AE_OK; 756 757 err: 758 /* Exit on error, but don't abort the namespace walk */ 759 acpi_handle_info(handle, "Invalid processor object\n"); 760 return AE_OK; 761 762 } 763 764 static void __init acpi_processor_check_duplicates(void) 765 { 766 /* check the correctness for all processors in ACPI namespace */ 767 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 768 ACPI_UINT32_MAX, 769 acpi_processor_ids_walk, 770 NULL, NULL, NULL); 771 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk, 772 NULL, NULL); 773 } 774 775 bool acpi_duplicate_processor_id(int proc_id) 776 { 777 int i; 778 779 /* 780 * compare the proc_id with duplicate IDs, if the proc_id is already 781 * in the duplicate IDs, return true, otherwise, return false. 782 */ 783 for (i = 0; i < nr_duplicate_ids; i++) { 784 if (duplicate_processor_ids[i] == proc_id) 785 return true; 786 } 787 return false; 788 } 789 790 void __init acpi_processor_init(void) 791 { 792 acpi_processor_check_duplicates(); 793 acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); 794 acpi_scan_add_handler(&processor_container_handler); 795 acpi_pcc_cpufreq_init(); 796 } 797 798 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE 799 /** 800 * acpi_processor_claim_cst_control - Request _CST control from the platform. 801 */ 802 bool acpi_processor_claim_cst_control(void) 803 { 804 static bool cst_control_claimed; 805 acpi_status status; 806 807 if (!acpi_gbl_FADT.cst_control || cst_control_claimed) 808 return true; 809 810 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 811 acpi_gbl_FADT.cst_control, 8); 812 if (ACPI_FAILURE(status)) { 813 pr_warn("ACPI: Failed to claim processor _CST control\n"); 814 return false; 815 } 816 817 cst_control_claimed = true; 818 return true; 819 } 820 EXPORT_SYMBOL_NS_GPL(acpi_processor_claim_cst_control, "ACPI_PROCESSOR_IDLE"); 821 822 /** 823 * acpi_processor_evaluate_cst - Evaluate the processor _CST control method. 824 * @handle: ACPI handle of the processor object containing the _CST. 825 * @cpu: The numeric ID of the target CPU. 826 * @info: Object write the C-states information into. 827 * 828 * Extract the C-state information for the given CPU from the output of the _CST 829 * control method under the corresponding ACPI processor object (or processor 830 * device object) and populate @info with it. 831 * 832 * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke 833 * acpi_processor_ffh_cstate_probe() to verify them and update the 834 * cpu_cstate_entry data for @cpu. 835 */ 836 int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, 837 struct acpi_processor_power *info) 838 { 839 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 840 union acpi_object *cst; 841 acpi_status status; 842 u64 count; 843 int last_index = 0; 844 int i, ret = 0; 845 846 status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); 847 if (ACPI_FAILURE(status)) { 848 acpi_handle_debug(handle, "No _CST\n"); 849 return -ENODEV; 850 } 851 852 cst = buffer.pointer; 853 854 /* There must be at least 2 elements. */ 855 if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) { 856 acpi_handle_warn(handle, "Invalid _CST output\n"); 857 ret = -EFAULT; 858 goto end; 859 } 860 861 count = cst->package.elements[0].integer.value; 862 863 /* Validate the number of C-states. */ 864 if (count < 1 || count != cst->package.count - 1) { 865 acpi_handle_warn(handle, "Inconsistent _CST data\n"); 866 ret = -EFAULT; 867 goto end; 868 } 869 870 for (i = 1; i <= count; i++) { 871 union acpi_object *element; 872 union acpi_object *obj; 873 struct acpi_power_register *reg; 874 struct acpi_processor_cx cx; 875 876 /* 877 * If there is not enough space for all C-states, skip the 878 * excess ones and log a warning. 879 */ 880 if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) { 881 acpi_handle_warn(handle, 882 "No room for more idle states (limit: %d)\n", 883 ACPI_PROCESSOR_MAX_POWER - 1); 884 break; 885 } 886 887 memset(&cx, 0, sizeof(cx)); 888 889 element = &cst->package.elements[i]; 890 if (element->type != ACPI_TYPE_PACKAGE) { 891 acpi_handle_info(handle, "_CST C%d type(%x) is not package, skip...\n", 892 i, element->type); 893 continue; 894 } 895 896 if (element->package.count != 4) { 897 acpi_handle_info(handle, "_CST C%d package count(%d) is not 4, skip...\n", 898 i, element->package.count); 899 continue; 900 } 901 902 obj = &element->package.elements[0]; 903 904 if (obj->type != ACPI_TYPE_BUFFER) { 905 acpi_handle_info(handle, "_CST C%d package element[0] type(%x) is not buffer, skip...\n", 906 i, obj->type); 907 continue; 908 } 909 910 reg = (struct acpi_power_register *)obj->buffer.pointer; 911 912 obj = &element->package.elements[1]; 913 if (obj->type != ACPI_TYPE_INTEGER) { 914 acpi_handle_info(handle, "_CST C[%d] package element[1] type(%x) is not integer, skip...\n", 915 i, obj->type); 916 continue; 917 } 918 919 cx.type = obj->integer.value; 920 /* 921 * There are known cases in which the _CST output does not 922 * contain C1, so if the type of the first state found is not 923 * C1, leave an empty slot for C1 to be filled in later. 924 */ 925 if (i == 1 && cx.type != ACPI_STATE_C1) 926 last_index = 1; 927 928 cx.address = reg->address; 929 cx.index = last_index + 1; 930 931 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 932 if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) { 933 /* 934 * In the majority of cases _CST describes C1 as 935 * a FIXED_HARDWARE C-state, but if the command 936 * line forbids using MWAIT, use CSTATE_HALT for 937 * C1 regardless. 938 */ 939 if (cx.type == ACPI_STATE_C1 && 940 boot_option_idle_override == IDLE_NOMWAIT) { 941 cx.entry_method = ACPI_CSTATE_HALT; 942 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 943 } else { 944 cx.entry_method = ACPI_CSTATE_FFH; 945 } 946 } else if (cx.type == ACPI_STATE_C1) { 947 /* 948 * In the special case of C1, FIXED_HARDWARE can 949 * be handled by executing the HLT instruction. 950 */ 951 cx.entry_method = ACPI_CSTATE_HALT; 952 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 953 } else { 954 acpi_handle_info(handle, "_CST C%d declares FIXED_HARDWARE C-state but not supported in hardware, skip...\n", 955 i); 956 continue; 957 } 958 } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 959 cx.entry_method = ACPI_CSTATE_SYSTEMIO; 960 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 961 cx.address); 962 } else { 963 acpi_handle_info(handle, "_CST C%d space_id(%x) neither FIXED_HARDWARE nor SYSTEM_IO, skip...\n", 964 i, reg->space_id); 965 continue; 966 } 967 968 if (cx.type == ACPI_STATE_C1) 969 cx.valid = 1; 970 971 obj = &element->package.elements[2]; 972 if (obj->type != ACPI_TYPE_INTEGER) { 973 acpi_handle_info(handle, "_CST C%d package element[2] type(%x) not integer, skip...\n", 974 i, obj->type); 975 continue; 976 } 977 978 cx.latency = obj->integer.value; 979 980 obj = &element->package.elements[3]; 981 if (obj->type != ACPI_TYPE_INTEGER) { 982 acpi_handle_info(handle, "_CST C%d package element[3] type(%x) not integer, skip...\n", 983 i, obj->type); 984 continue; 985 } 986 987 memcpy(&info->states[++last_index], &cx, sizeof(cx)); 988 } 989 990 acpi_handle_debug(handle, "Found %d idle states\n", last_index); 991 992 info->count = last_index; 993 994 end: 995 kfree(buffer.pointer); 996 997 return ret; 998 } 999 EXPORT_SYMBOL_NS_GPL(acpi_processor_evaluate_cst, "ACPI_PROCESSOR_IDLE"); 1000 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ 1001