1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * acpi_processor.c - ACPI processor enumeration support 4 * 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 9 * Copyright (C) 2013, Intel Corporation 10 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 11 */ 12 #define pr_fmt(fmt) "ACPI: " fmt 13 14 #include <linux/acpi.h> 15 #include <linux/cpu.h> 16 #include <linux/device.h> 17 #include <linux/dmi.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/pci.h> 21 #include <linux/platform_device.h> 22 23 #include <acpi/processor.h> 24 25 #include <asm/cpu.h> 26 27 #include <xen/xen.h> 28 29 #include "internal.h" 30 31 DEFINE_PER_CPU(struct acpi_processor *, processors); 32 EXPORT_PER_CPU_SYMBOL(processors); 33 34 /* Errata Handling */ 35 struct acpi_processor_errata errata __read_mostly; 36 EXPORT_SYMBOL_GPL(errata); 37 38 acpi_handle acpi_get_processor_handle(int cpu) 39 { 40 struct acpi_processor *pr; 41 42 pr = per_cpu(processors, cpu); 43 if (pr) 44 return pr->handle; 45 46 return NULL; 47 } 48 49 static int acpi_processor_errata_piix4(struct pci_dev *dev) 50 { 51 u8 value1 = 0; 52 u8 value2 = 0; 53 54 55 if (!dev) 56 return -EINVAL; 57 58 /* 59 * Note that 'dev' references the PIIX4 ACPI Controller. 60 */ 61 62 switch (dev->revision) { 63 case 0: 64 dev_dbg(&dev->dev, "Found PIIX4 A-step\n"); 65 break; 66 case 1: 67 dev_dbg(&dev->dev, "Found PIIX4 B-step\n"); 68 break; 69 case 2: 70 dev_dbg(&dev->dev, "Found PIIX4E\n"); 71 break; 72 case 3: 73 dev_dbg(&dev->dev, "Found PIIX4M\n"); 74 break; 75 default: 76 dev_dbg(&dev->dev, "Found unknown PIIX4\n"); 77 break; 78 } 79 80 switch (dev->revision) { 81 82 case 0: /* PIIX4 A-step */ 83 case 1: /* PIIX4 B-step */ 84 /* 85 * See specification changes #13 ("Manual Throttle Duty Cycle") 86 * and #14 ("Enabling and Disabling Manual Throttle"), plus 87 * erratum #5 ("STPCLK# Deassertion Time") from the January 88 * 2002 PIIX4 specification update. Applies to only older 89 * PIIX4 models. 90 */ 91 errata.piix4.throttle = 1; 92 fallthrough; 93 94 case 2: /* PIIX4E */ 95 case 3: /* PIIX4M */ 96 /* 97 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 98 * Livelock") from the January 2002 PIIX4 specification update. 99 * Applies to all PIIX4 models. 100 */ 101 102 /* 103 * BM-IDE 104 * ------ 105 * Find the PIIX4 IDE Controller and get the Bus Master IDE 106 * Status register address. We'll use this later to read 107 * each IDE controller's DMA status to make sure we catch all 108 * DMA activity. 109 */ 110 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 111 PCI_DEVICE_ID_INTEL_82371AB, 112 PCI_ANY_ID, PCI_ANY_ID, NULL); 113 if (dev) { 114 errata.piix4.bmisx = pci_resource_start(dev, 4); 115 pci_dev_put(dev); 116 } 117 118 /* 119 * Type-F DMA 120 * ---------- 121 * Find the PIIX4 ISA Controller and read the Motherboard 122 * DMA controller's status to see if Type-F (Fast) DMA mode 123 * is enabled (bit 7) on either channel. Note that we'll 124 * disable C3 support if this is enabled, as some legacy 125 * devices won't operate well if fast DMA is disabled. 126 */ 127 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 128 PCI_DEVICE_ID_INTEL_82371AB_0, 129 PCI_ANY_ID, PCI_ANY_ID, NULL); 130 if (dev) { 131 pci_read_config_byte(dev, 0x76, &value1); 132 pci_read_config_byte(dev, 0x77, &value2); 133 if ((value1 & 0x80) || (value2 & 0x80)) 134 errata.piix4.fdma = 1; 135 pci_dev_put(dev); 136 } 137 138 break; 139 } 140 141 if (errata.piix4.bmisx) 142 dev_dbg(&dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n"); 143 if (errata.piix4.fdma) 144 dev_dbg(&dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n"); 145 146 return 0; 147 } 148 149 static int acpi_processor_errata(void) 150 { 151 int result = 0; 152 struct pci_dev *dev = NULL; 153 154 /* 155 * PIIX4 156 */ 157 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 158 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, 159 PCI_ANY_ID, NULL); 160 if (dev) { 161 result = acpi_processor_errata_piix4(dev); 162 pci_dev_put(dev); 163 } 164 165 return result; 166 } 167 168 /* Create a platform device to represent a CPU frequency control mechanism. */ 169 static void cpufreq_add_device(const char *name) 170 { 171 struct platform_device *pdev; 172 173 pdev = platform_device_register_simple(name, PLATFORM_DEVID_NONE, NULL, 0); 174 if (IS_ERR(pdev)) 175 pr_info("%s device creation failed: %pe\n", name, pdev); 176 } 177 178 #ifdef CONFIG_X86 179 /* Check presence of Processor Clocking Control by searching for \_SB.PCCH. */ 180 static void __init acpi_pcc_cpufreq_init(void) 181 { 182 acpi_status status; 183 acpi_handle handle; 184 185 status = acpi_get_handle(NULL, "\\_SB", &handle); 186 if (ACPI_FAILURE(status)) 187 return; 188 189 if (acpi_has_method(handle, "PCCH")) 190 cpufreq_add_device("pcc-cpufreq"); 191 } 192 #else 193 static void __init acpi_pcc_cpufreq_init(void) {} 194 #endif /* CONFIG_X86 */ 195 196 /* Initialization */ 197 static DEFINE_PER_CPU(void *, processor_device_array); 198 199 static int acpi_processor_set_per_cpu(struct acpi_processor *pr, 200 struct acpi_device *device) 201 { 202 BUG_ON(pr->id >= nr_cpu_ids); 203 204 /* 205 * Buggy BIOS check. 206 * ACPI id of processors can be reported wrongly by the BIOS. 207 * Don't trust it blindly 208 */ 209 if (per_cpu(processor_device_array, pr->id) != NULL && 210 per_cpu(processor_device_array, pr->id) != device) { 211 dev_warn(&device->dev, 212 "BIOS reported wrong ACPI id %d for the processor\n", 213 pr->id); 214 return -EINVAL; 215 } 216 /* 217 * processor_device_array is not cleared on errors to allow buggy BIOS 218 * checks. 219 */ 220 per_cpu(processor_device_array, pr->id) = device; 221 per_cpu(processors, pr->id) = pr; 222 223 return 0; 224 } 225 226 #ifdef CONFIG_ACPI_HOTPLUG_CPU 227 static int acpi_processor_hotadd_init(struct acpi_processor *pr, 228 struct acpi_device *device) 229 { 230 int ret; 231 232 if (invalid_phys_cpuid(pr->phys_id)) 233 return -ENODEV; 234 235 cpu_maps_update_begin(); 236 cpus_write_lock(); 237 238 ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id); 239 if (ret) 240 goto out; 241 242 ret = acpi_processor_set_per_cpu(pr, device); 243 if (ret) { 244 acpi_unmap_cpu(pr->id); 245 goto out; 246 } 247 248 ret = arch_register_cpu(pr->id); 249 if (ret) { 250 /* Leave the processor device array in place to detect buggy bios */ 251 per_cpu(processors, pr->id) = NULL; 252 acpi_unmap_cpu(pr->id); 253 goto out; 254 } 255 256 /* 257 * CPU got hot-added, but cpu_data is not initialized yet. Do 258 * cpu_idle/throttling initialization when the CPU gets online for 259 * the first time. 260 */ 261 pr_info("CPU%d has been hot-added\n", pr->id); 262 263 out: 264 cpus_write_unlock(); 265 cpu_maps_update_done(); 266 return ret; 267 } 268 #else 269 static inline int acpi_processor_hotadd_init(struct acpi_processor *pr, 270 struct acpi_device *device) 271 { 272 return -ENODEV; 273 } 274 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 275 276 static int acpi_processor_get_info(struct acpi_device *device) 277 { 278 union acpi_object object = { 0 }; 279 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 280 struct acpi_processor *pr = acpi_driver_data(device); 281 int device_declaration = 0; 282 acpi_status status = AE_OK; 283 static int cpu0_initialized; 284 unsigned long long value; 285 int ret; 286 287 acpi_processor_errata(); 288 289 /* 290 * Check to see if we have bus mastering arbitration control. This 291 * is required for proper C3 usage (to maintain cache coherency). 292 */ 293 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { 294 pr->flags.bm_control = 1; 295 dev_dbg(&device->dev, "Bus mastering arbitration control present\n"); 296 } else 297 dev_dbg(&device->dev, "No bus mastering arbitration control\n"); 298 299 if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { 300 /* Declared with "Processor" statement; match ProcessorID */ 301 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 302 if (ACPI_FAILURE(status)) { 303 dev_err(&device->dev, 304 "Failed to evaluate processor object (0x%x)\n", 305 status); 306 return -ENODEV; 307 } 308 309 pr->acpi_id = object.processor.proc_id; 310 } else { 311 /* 312 * Declared with "Device" statement; match _UID. 313 */ 314 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, 315 NULL, &value); 316 if (ACPI_FAILURE(status)) { 317 dev_err(&device->dev, 318 "Failed to evaluate processor _UID (0x%x)\n", 319 status); 320 return -ENODEV; 321 } 322 device_declaration = 1; 323 pr->acpi_id = value; 324 } 325 326 if (acpi_duplicate_processor_id(pr->acpi_id)) { 327 if (pr->acpi_id == 0xff) 328 dev_info_once(&device->dev, 329 "Entry not well-defined, consider updating BIOS\n"); 330 else 331 dev_err(&device->dev, 332 "Failed to get unique processor _UID (0x%x)\n", 333 pr->acpi_id); 334 return -ENODEV; 335 } 336 337 pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, 338 pr->acpi_id); 339 if (invalid_phys_cpuid(pr->phys_id)) 340 dev_dbg(&device->dev, "Failed to get CPU physical ID.\n"); 341 342 pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id); 343 if (!cpu0_initialized) { 344 cpu0_initialized = 1; 345 /* 346 * Handle UP system running SMP kernel, with no CPU 347 * entry in MADT 348 */ 349 if (!acpi_has_cpu_in_madt() && invalid_logical_cpuid(pr->id) && 350 (num_online_cpus() == 1)) 351 pr->id = 0; 352 /* 353 * Check availability of Processor Performance Control by 354 * looking at the presence of the _PCT object under the first 355 * processor definition. 356 */ 357 if (acpi_has_method(pr->handle, "_PCT")) 358 cpufreq_add_device("acpi-cpufreq"); 359 } 360 361 /* 362 * This code is not called unless we know the CPU is present and 363 * enabled. The two paths are: 364 * a) Initially present CPUs on architectures that do not defer 365 * their arch_register_cpu() calls until this point. 366 * b) Hotplugged CPUs (enabled bit in _STA has transitioned from not 367 * enabled to enabled) 368 */ 369 if (!get_cpu_device(pr->id)) 370 ret = acpi_processor_hotadd_init(pr, device); 371 else 372 ret = acpi_processor_set_per_cpu(pr, device); 373 if (ret) 374 return ret; 375 376 /* 377 * On some boxes several processors use the same processor bus id. 378 * But they are located in different scope. For example: 379 * \_SB.SCK0.CPU0 380 * \_SB.SCK1.CPU0 381 * Rename the processor device bus id. And the new bus id will be 382 * generated as the following format: 383 * CPU+CPU ID. 384 */ 385 sprintf(acpi_device_bid(device), "CPU%X", pr->id); 386 dev_dbg(&device->dev, "Processor [%d:%d]\n", pr->id, pr->acpi_id); 387 388 if (!object.processor.pblk_address) 389 dev_dbg(&device->dev, "No PBLK (NULL address)\n"); 390 else if (object.processor.pblk_length != 6) 391 dev_err(&device->dev, "Invalid PBLK length [%d]\n", 392 object.processor.pblk_length); 393 else { 394 pr->throttling.address = object.processor.pblk_address; 395 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; 396 pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 397 398 pr->pblk = object.processor.pblk_address; 399 } 400 401 /* 402 * If ACPI describes a slot number for this CPU, we can use it to 403 * ensure we get the right value in the "physical id" field 404 * of /proc/cpuinfo 405 */ 406 status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value); 407 if (ACPI_SUCCESS(status)) 408 arch_fix_phys_package_id(pr->id, value); 409 410 return 0; 411 } 412 413 /* 414 * Do not put anything in here which needs the core to be online. 415 * For example MSR access or setting up things which check for cpuinfo_x86 416 * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. 417 * Such things have to be put in and set up by the processor driver's .probe(). 418 */ 419 static int acpi_processor_add(struct acpi_device *device, 420 const struct acpi_device_id *id) 421 { 422 struct acpi_processor *pr; 423 struct device *dev; 424 int result = 0; 425 426 if (!acpi_device_is_enabled(device)) 427 return -ENODEV; 428 429 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); 430 if (!pr) 431 return -ENOMEM; 432 433 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 434 result = -ENOMEM; 435 goto err_free_pr; 436 } 437 438 pr->handle = device->handle; 439 strscpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 440 strscpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 441 device->driver_data = pr; 442 443 result = acpi_processor_get_info(device); 444 if (result) /* Processor is not physically present or unavailable */ 445 goto err_clear_driver_data; 446 447 dev = get_cpu_device(pr->id); 448 if (!dev) { 449 result = -ENODEV; 450 goto err_clear_per_cpu; 451 } 452 453 result = acpi_bind_one(dev, device); 454 if (result) 455 goto err_clear_per_cpu; 456 457 pr->dev = dev; 458 459 /* Trigger the processor driver's .probe() if present. */ 460 if (device_attach(dev) >= 0) 461 return 1; 462 463 dev_err(dev, "Processor driver could not be attached\n"); 464 acpi_unbind_one(dev); 465 466 err_clear_per_cpu: 467 per_cpu(processors, pr->id) = NULL; 468 err_clear_driver_data: 469 device->driver_data = NULL; 470 free_cpumask_var(pr->throttling.shared_cpu_map); 471 err_free_pr: 472 kfree(pr); 473 return result; 474 } 475 476 #ifdef CONFIG_ACPI_HOTPLUG_CPU 477 /* Removal */ 478 static void acpi_processor_post_eject(struct acpi_device *device) 479 { 480 struct acpi_processor *pr; 481 482 if (!device || !acpi_driver_data(device)) 483 return; 484 485 pr = acpi_driver_data(device); 486 if (pr->id >= nr_cpu_ids) 487 goto out; 488 489 /* 490 * The only reason why we ever get here is CPU hot-removal. The CPU is 491 * already offline and the ACPI device removal locking prevents it from 492 * being put back online at this point. 493 * 494 * Unbind the driver from the processor device and detach it from the 495 * ACPI companion object. 496 */ 497 device_release_driver(pr->dev); 498 acpi_unbind_one(pr->dev); 499 500 cpu_maps_update_begin(); 501 cpus_write_lock(); 502 503 /* Remove the CPU. */ 504 arch_unregister_cpu(pr->id); 505 acpi_unmap_cpu(pr->id); 506 507 /* Clean up. */ 508 per_cpu(processor_device_array, pr->id) = NULL; 509 per_cpu(processors, pr->id) = NULL; 510 511 cpus_write_unlock(); 512 cpu_maps_update_done(); 513 514 try_offline_node(cpu_to_node(pr->id)); 515 516 out: 517 free_cpumask_var(pr->throttling.shared_cpu_map); 518 kfree(pr); 519 } 520 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 521 522 #ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC 523 bool __init processor_physically_present(acpi_handle handle) 524 { 525 int cpuid, type; 526 u32 acpi_id; 527 acpi_status status; 528 acpi_object_type acpi_type; 529 unsigned long long tmp; 530 union acpi_object object = {}; 531 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 532 533 status = acpi_get_type(handle, &acpi_type); 534 if (ACPI_FAILURE(status)) 535 return false; 536 537 switch (acpi_type) { 538 case ACPI_TYPE_PROCESSOR: 539 status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 540 if (ACPI_FAILURE(status)) 541 return false; 542 acpi_id = object.processor.proc_id; 543 break; 544 case ACPI_TYPE_DEVICE: 545 status = acpi_evaluate_integer(handle, METHOD_NAME__UID, 546 NULL, &tmp); 547 if (ACPI_FAILURE(status)) 548 return false; 549 acpi_id = tmp; 550 break; 551 default: 552 return false; 553 } 554 555 if (xen_initial_domain()) 556 /* 557 * When running as a Xen dom0 the number of processors Linux 558 * sees can be different from the real number of processors on 559 * the system, and we still need to execute _PDC or _OSC for 560 * all of them. 561 */ 562 return xen_processor_present(acpi_id); 563 564 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; 565 cpuid = acpi_get_cpuid(handle, type, acpi_id); 566 567 return !invalid_logical_cpuid(cpuid); 568 } 569 570 /* vendor specific UUID indicating an Intel platform */ 571 static u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953"; 572 573 static acpi_status __init acpi_processor_osc(acpi_handle handle, u32 lvl, 574 void *context, void **rv) 575 { 576 u32 capbuf[2] = {}; 577 struct acpi_osc_context osc_context = { 578 .uuid_str = sb_uuid_str, 579 .rev = 1, 580 .cap.length = 8, 581 .cap.pointer = capbuf, 582 }; 583 acpi_status status; 584 585 if (!processor_physically_present(handle)) 586 return AE_OK; 587 588 arch_acpi_set_proc_cap_bits(&capbuf[OSC_SUPPORT_DWORD]); 589 590 status = acpi_run_osc(handle, &osc_context); 591 if (ACPI_FAILURE(status)) 592 return status; 593 594 kfree(osc_context.ret.pointer); 595 596 return AE_OK; 597 } 598 599 static bool __init acpi_early_processor_osc(void) 600 { 601 acpi_status status; 602 603 acpi_proc_quirk_mwait_check(); 604 605 status = acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 606 ACPI_UINT32_MAX, acpi_processor_osc, NULL, 607 NULL, NULL); 608 if (ACPI_FAILURE(status)) 609 return false; 610 611 status = acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_osc, 612 NULL, NULL); 613 if (ACPI_FAILURE(status)) 614 return false; 615 616 return true; 617 } 618 619 void __init acpi_early_processor_control_setup(void) 620 { 621 if (acpi_early_processor_osc()) { 622 pr_debug("_OSC evaluated successfully for all CPUs\n"); 623 } else { 624 pr_debug("_OSC evaluation for CPUs failed, trying _PDC\n"); 625 acpi_early_processor_set_pdc(); 626 } 627 } 628 #endif 629 630 /* 631 * The following ACPI IDs are known to be suitable for representing as 632 * processor devices. 633 */ 634 static const struct acpi_device_id processor_device_ids[] = { 635 636 { ACPI_PROCESSOR_OBJECT_HID, }, 637 { ACPI_PROCESSOR_DEVICE_HID, }, 638 639 { } 640 }; 641 642 static struct acpi_scan_handler processor_handler = { 643 .ids = processor_device_ids, 644 .attach = acpi_processor_add, 645 #ifdef CONFIG_ACPI_HOTPLUG_CPU 646 .post_eject = acpi_processor_post_eject, 647 #endif 648 .hotplug = { 649 .enabled = true, 650 }, 651 }; 652 653 static int acpi_processor_container_attach(struct acpi_device *dev, 654 const struct acpi_device_id *id) 655 { 656 return 1; 657 } 658 659 static const struct acpi_device_id processor_container_ids[] = { 660 { ACPI_PROCESSOR_CONTAINER_HID, }, 661 { } 662 }; 663 664 static struct acpi_scan_handler processor_container_handler = { 665 .ids = processor_container_ids, 666 .attach = acpi_processor_container_attach, 667 }; 668 669 /* The number of the unique processor IDs */ 670 static int nr_unique_ids __initdata; 671 672 /* The number of the duplicate processor IDs */ 673 static int nr_duplicate_ids; 674 675 /* Used to store the unique processor IDs */ 676 static int unique_processor_ids[] __initdata = { 677 [0 ... NR_CPUS - 1] = -1, 678 }; 679 680 /* Used to store the duplicate processor IDs */ 681 static int duplicate_processor_ids[] = { 682 [0 ... NR_CPUS - 1] = -1, 683 }; 684 685 static void __init processor_validated_ids_update(int proc_id) 686 { 687 int i; 688 689 if (nr_unique_ids == NR_CPUS||nr_duplicate_ids == NR_CPUS) 690 return; 691 692 /* 693 * Firstly, compare the proc_id with duplicate IDs, if the proc_id is 694 * already in the IDs, do nothing. 695 */ 696 for (i = 0; i < nr_duplicate_ids; i++) { 697 if (duplicate_processor_ids[i] == proc_id) 698 return; 699 } 700 701 /* 702 * Secondly, compare the proc_id with unique IDs, if the proc_id is in 703 * the IDs, put it in the duplicate IDs. 704 */ 705 for (i = 0; i < nr_unique_ids; i++) { 706 if (unique_processor_ids[i] == proc_id) { 707 duplicate_processor_ids[nr_duplicate_ids] = proc_id; 708 nr_duplicate_ids++; 709 return; 710 } 711 } 712 713 /* 714 * Lastly, the proc_id is a unique ID, put it in the unique IDs. 715 */ 716 unique_processor_ids[nr_unique_ids] = proc_id; 717 nr_unique_ids++; 718 } 719 720 static acpi_status __init acpi_processor_ids_walk(acpi_handle handle, 721 u32 lvl, 722 void *context, 723 void **rv) 724 { 725 acpi_status status; 726 acpi_object_type acpi_type; 727 unsigned long long uid; 728 union acpi_object object = { 0 }; 729 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 730 731 status = acpi_get_type(handle, &acpi_type); 732 if (ACPI_FAILURE(status)) 733 return status; 734 735 switch (acpi_type) { 736 case ACPI_TYPE_PROCESSOR: 737 status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 738 if (ACPI_FAILURE(status)) 739 goto err; 740 uid = object.processor.proc_id; 741 break; 742 743 case ACPI_TYPE_DEVICE: 744 status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); 745 if (ACPI_FAILURE(status)) 746 goto err; 747 break; 748 default: 749 goto err; 750 } 751 752 processor_validated_ids_update(uid); 753 return AE_OK; 754 755 err: 756 /* Exit on error, but don't abort the namespace walk */ 757 acpi_handle_info(handle, "Invalid processor object\n"); 758 return AE_OK; 759 760 } 761 762 static void __init acpi_processor_check_duplicates(void) 763 { 764 /* check the correctness for all processors in ACPI namespace */ 765 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 766 ACPI_UINT32_MAX, 767 acpi_processor_ids_walk, 768 NULL, NULL, NULL); 769 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk, 770 NULL, NULL); 771 } 772 773 bool acpi_duplicate_processor_id(int proc_id) 774 { 775 int i; 776 777 /* 778 * compare the proc_id with duplicate IDs, if the proc_id is already 779 * in the duplicate IDs, return true, otherwise, return false. 780 */ 781 for (i = 0; i < nr_duplicate_ids; i++) { 782 if (duplicate_processor_ids[i] == proc_id) 783 return true; 784 } 785 return false; 786 } 787 788 void __init acpi_processor_init(void) 789 { 790 acpi_processor_check_duplicates(); 791 acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); 792 acpi_scan_add_handler(&processor_container_handler); 793 acpi_pcc_cpufreq_init(); 794 } 795 796 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE 797 /** 798 * acpi_processor_claim_cst_control - Request _CST control from the platform. 799 */ 800 bool acpi_processor_claim_cst_control(void) 801 { 802 static bool cst_control_claimed; 803 acpi_status status; 804 805 if (!acpi_gbl_FADT.cst_control || cst_control_claimed) 806 return true; 807 808 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 809 acpi_gbl_FADT.cst_control, 8); 810 if (ACPI_FAILURE(status)) { 811 pr_warn("ACPI: Failed to claim processor _CST control\n"); 812 return false; 813 } 814 815 cst_control_claimed = true; 816 return true; 817 } 818 EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control); 819 820 /** 821 * acpi_processor_evaluate_cst - Evaluate the processor _CST control method. 822 * @handle: ACPI handle of the processor object containing the _CST. 823 * @cpu: The numeric ID of the target CPU. 824 * @info: Object write the C-states information into. 825 * 826 * Extract the C-state information for the given CPU from the output of the _CST 827 * control method under the corresponding ACPI processor object (or processor 828 * device object) and populate @info with it. 829 * 830 * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke 831 * acpi_processor_ffh_cstate_probe() to verify them and update the 832 * cpu_cstate_entry data for @cpu. 833 */ 834 int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, 835 struct acpi_processor_power *info) 836 { 837 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 838 union acpi_object *cst; 839 acpi_status status; 840 u64 count; 841 int last_index = 0; 842 int i, ret = 0; 843 844 status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); 845 if (ACPI_FAILURE(status)) { 846 acpi_handle_debug(handle, "No _CST\n"); 847 return -ENODEV; 848 } 849 850 cst = buffer.pointer; 851 852 /* There must be at least 2 elements. */ 853 if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) { 854 acpi_handle_warn(handle, "Invalid _CST output\n"); 855 ret = -EFAULT; 856 goto end; 857 } 858 859 count = cst->package.elements[0].integer.value; 860 861 /* Validate the number of C-states. */ 862 if (count < 1 || count != cst->package.count - 1) { 863 acpi_handle_warn(handle, "Inconsistent _CST data\n"); 864 ret = -EFAULT; 865 goto end; 866 } 867 868 for (i = 1; i <= count; i++) { 869 union acpi_object *element; 870 union acpi_object *obj; 871 struct acpi_power_register *reg; 872 struct acpi_processor_cx cx; 873 874 /* 875 * If there is not enough space for all C-states, skip the 876 * excess ones and log a warning. 877 */ 878 if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) { 879 acpi_handle_warn(handle, 880 "No room for more idle states (limit: %d)\n", 881 ACPI_PROCESSOR_MAX_POWER - 1); 882 break; 883 } 884 885 memset(&cx, 0, sizeof(cx)); 886 887 element = &cst->package.elements[i]; 888 if (element->type != ACPI_TYPE_PACKAGE) { 889 acpi_handle_info(handle, "_CST C%d type(%x) is not package, skip...\n", 890 i, element->type); 891 continue; 892 } 893 894 if (element->package.count != 4) { 895 acpi_handle_info(handle, "_CST C%d package count(%d) is not 4, skip...\n", 896 i, element->package.count); 897 continue; 898 } 899 900 obj = &element->package.elements[0]; 901 902 if (obj->type != ACPI_TYPE_BUFFER) { 903 acpi_handle_info(handle, "_CST C%d package element[0] type(%x) is not buffer, skip...\n", 904 i, obj->type); 905 continue; 906 } 907 908 reg = (struct acpi_power_register *)obj->buffer.pointer; 909 910 obj = &element->package.elements[1]; 911 if (obj->type != ACPI_TYPE_INTEGER) { 912 acpi_handle_info(handle, "_CST C[%d] package element[1] type(%x) is not integer, skip...\n", 913 i, obj->type); 914 continue; 915 } 916 917 cx.type = obj->integer.value; 918 /* 919 * There are known cases in which the _CST output does not 920 * contain C1, so if the type of the first state found is not 921 * C1, leave an empty slot for C1 to be filled in later. 922 */ 923 if (i == 1 && cx.type != ACPI_STATE_C1) 924 last_index = 1; 925 926 cx.address = reg->address; 927 cx.index = last_index + 1; 928 929 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 930 if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) { 931 /* 932 * In the majority of cases _CST describes C1 as 933 * a FIXED_HARDWARE C-state, but if the command 934 * line forbids using MWAIT, use CSTATE_HALT for 935 * C1 regardless. 936 */ 937 if (cx.type == ACPI_STATE_C1 && 938 boot_option_idle_override == IDLE_NOMWAIT) { 939 cx.entry_method = ACPI_CSTATE_HALT; 940 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 941 } else { 942 cx.entry_method = ACPI_CSTATE_FFH; 943 } 944 } else if (cx.type == ACPI_STATE_C1) { 945 /* 946 * In the special case of C1, FIXED_HARDWARE can 947 * be handled by executing the HLT instruction. 948 */ 949 cx.entry_method = ACPI_CSTATE_HALT; 950 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 951 } else { 952 acpi_handle_info(handle, "_CST C%d declares FIXED_HARDWARE C-state but not supported in hardware, skip...\n", 953 i); 954 continue; 955 } 956 } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 957 cx.entry_method = ACPI_CSTATE_SYSTEMIO; 958 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 959 cx.address); 960 } else { 961 acpi_handle_info(handle, "_CST C%d space_id(%x) neither FIXED_HARDWARE nor SYSTEM_IO, skip...\n", 962 i, reg->space_id); 963 continue; 964 } 965 966 if (cx.type == ACPI_STATE_C1) 967 cx.valid = 1; 968 969 obj = &element->package.elements[2]; 970 if (obj->type != ACPI_TYPE_INTEGER) { 971 acpi_handle_info(handle, "_CST C%d package element[2] type(%x) not integer, skip...\n", 972 i, obj->type); 973 continue; 974 } 975 976 cx.latency = obj->integer.value; 977 978 obj = &element->package.elements[3]; 979 if (obj->type != ACPI_TYPE_INTEGER) { 980 acpi_handle_info(handle, "_CST C%d package element[3] type(%x) not integer, skip...\n", 981 i, obj->type); 982 continue; 983 } 984 985 memcpy(&info->states[++last_index], &cx, sizeof(cx)); 986 } 987 988 acpi_handle_debug(handle, "Found %d idle states\n", last_index); 989 990 info->count = last_index; 991 992 end: 993 kfree(buffer.pointer); 994 995 return ret; 996 } 997 EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst); 998 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ 999