1 /* 2 * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $) 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or (at 15 * your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License along 23 * with this program; if not, write to the Free Software Foundation, Inc., 24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 * TBD: 28 * 1. Make # power states dynamic. 29 * 2. Support duty_cycle values that span bit 4. 30 * 3. Optimize by having scheduler determine business instead of 31 * having us try to calculate it here. 32 * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this. 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/types.h> 39 #include <linux/pci.h> 40 #include <linux/pm.h> 41 #include <linux/cpufreq.h> 42 #include <linux/cpu.h> 43 #include <linux/proc_fs.h> 44 #include <linux/seq_file.h> 45 #include <linux/dmi.h> 46 #include <linux/moduleparam.h> 47 #include <linux/cpuidle.h> 48 49 #include <asm/io.h> 50 #include <asm/system.h> 51 #include <asm/cpu.h> 52 #include <asm/delay.h> 53 #include <asm/uaccess.h> 54 #include <asm/processor.h> 55 #include <asm/smp.h> 56 #include <asm/acpi.h> 57 58 #include <acpi/acpi_bus.h> 59 #include <acpi/acpi_drivers.h> 60 #include <acpi/processor.h> 61 62 #define ACPI_PROCESSOR_COMPONENT 0x01000000 63 #define ACPI_PROCESSOR_CLASS "processor" 64 #define ACPI_PROCESSOR_DEVICE_NAME "Processor" 65 #define ACPI_PROCESSOR_FILE_INFO "info" 66 #define ACPI_PROCESSOR_FILE_THROTTLING "throttling" 67 #define ACPI_PROCESSOR_FILE_LIMIT "limit" 68 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 69 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81 70 #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82 71 72 #define ACPI_PROCESSOR_LIMIT_USER 0 73 #define ACPI_PROCESSOR_LIMIT_THERMAL 1 74 75 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 76 ACPI_MODULE_NAME("processor_core"); 77 78 MODULE_AUTHOR("Paul Diefenbaugh"); 79 MODULE_DESCRIPTION("ACPI Processor Driver"); 80 MODULE_LICENSE("GPL"); 81 82 static int acpi_processor_add(struct acpi_device *device); 83 static int acpi_processor_start(struct acpi_device *device); 84 static int acpi_processor_remove(struct acpi_device *device, int type); 85 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file); 86 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data); 87 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); 88 static int acpi_processor_handle_eject(struct acpi_processor *pr); 89 extern int acpi_processor_tstate_has_changed(struct acpi_processor *pr); 90 91 92 static const struct acpi_device_id processor_device_ids[] = { 93 {ACPI_PROCESSOR_HID, 0}, 94 {"", 0}, 95 }; 96 MODULE_DEVICE_TABLE(acpi, processor_device_ids); 97 98 static struct acpi_driver acpi_processor_driver = { 99 .name = "processor", 100 .class = ACPI_PROCESSOR_CLASS, 101 .ids = processor_device_ids, 102 .ops = { 103 .add = acpi_processor_add, 104 .remove = acpi_processor_remove, 105 .start = acpi_processor_start, 106 .suspend = acpi_processor_suspend, 107 .resume = acpi_processor_resume, 108 }, 109 }; 110 111 #define INSTALL_NOTIFY_HANDLER 1 112 #define UNINSTALL_NOTIFY_HANDLER 2 113 114 static const struct file_operations acpi_processor_info_fops = { 115 .open = acpi_processor_info_open_fs, 116 .read = seq_read, 117 .llseek = seq_lseek, 118 .release = single_release, 119 }; 120 121 struct acpi_processor *processors[NR_CPUS]; 122 struct acpi_processor_errata errata __read_mostly; 123 124 /* -------------------------------------------------------------------------- 125 Errata Handling 126 -------------------------------------------------------------------------- */ 127 128 static int acpi_processor_errata_piix4(struct pci_dev *dev) 129 { 130 u8 value1 = 0; 131 u8 value2 = 0; 132 133 134 if (!dev) 135 return -EINVAL; 136 137 /* 138 * Note that 'dev' references the PIIX4 ACPI Controller. 139 */ 140 141 switch (dev->revision) { 142 case 0: 143 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n")); 144 break; 145 case 1: 146 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n")); 147 break; 148 case 2: 149 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n")); 150 break; 151 case 3: 152 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n")); 153 break; 154 default: 155 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n")); 156 break; 157 } 158 159 switch (dev->revision) { 160 161 case 0: /* PIIX4 A-step */ 162 case 1: /* PIIX4 B-step */ 163 /* 164 * See specification changes #13 ("Manual Throttle Duty Cycle") 165 * and #14 ("Enabling and Disabling Manual Throttle"), plus 166 * erratum #5 ("STPCLK# Deassertion Time") from the January 167 * 2002 PIIX4 specification update. Applies to only older 168 * PIIX4 models. 169 */ 170 errata.piix4.throttle = 1; 171 172 case 2: /* PIIX4E */ 173 case 3: /* PIIX4M */ 174 /* 175 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 176 * Livelock") from the January 2002 PIIX4 specification update. 177 * Applies to all PIIX4 models. 178 */ 179 180 /* 181 * BM-IDE 182 * ------ 183 * Find the PIIX4 IDE Controller and get the Bus Master IDE 184 * Status register address. We'll use this later to read 185 * each IDE controller's DMA status to make sure we catch all 186 * DMA activity. 187 */ 188 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 189 PCI_DEVICE_ID_INTEL_82371AB, 190 PCI_ANY_ID, PCI_ANY_ID, NULL); 191 if (dev) { 192 errata.piix4.bmisx = pci_resource_start(dev, 4); 193 pci_dev_put(dev); 194 } 195 196 /* 197 * Type-F DMA 198 * ---------- 199 * Find the PIIX4 ISA Controller and read the Motherboard 200 * DMA controller's status to see if Type-F (Fast) DMA mode 201 * is enabled (bit 7) on either channel. Note that we'll 202 * disable C3 support if this is enabled, as some legacy 203 * devices won't operate well if fast DMA is disabled. 204 */ 205 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 206 PCI_DEVICE_ID_INTEL_82371AB_0, 207 PCI_ANY_ID, PCI_ANY_ID, NULL); 208 if (dev) { 209 pci_read_config_byte(dev, 0x76, &value1); 210 pci_read_config_byte(dev, 0x77, &value2); 211 if ((value1 & 0x80) || (value2 & 0x80)) 212 errata.piix4.fdma = 1; 213 pci_dev_put(dev); 214 } 215 216 break; 217 } 218 219 if (errata.piix4.bmisx) 220 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 221 "Bus master activity detection (BM-IDE) erratum enabled\n")); 222 if (errata.piix4.fdma) 223 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 224 "Type-F DMA livelock erratum (C3 disabled)\n")); 225 226 return 0; 227 } 228 229 static int acpi_processor_errata(struct acpi_processor *pr) 230 { 231 int result = 0; 232 struct pci_dev *dev = NULL; 233 234 235 if (!pr) 236 return -EINVAL; 237 238 /* 239 * PIIX4 240 */ 241 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 242 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, 243 PCI_ANY_ID, NULL); 244 if (dev) { 245 result = acpi_processor_errata_piix4(dev); 246 pci_dev_put(dev); 247 } 248 249 return result; 250 } 251 252 /* -------------------------------------------------------------------------- 253 Common ACPI processor functions 254 -------------------------------------------------------------------------- */ 255 256 /* 257 * _PDC is required for a BIOS-OS handshake for most of the newer 258 * ACPI processor features. 259 */ 260 static int acpi_processor_set_pdc(struct acpi_processor *pr) 261 { 262 struct acpi_object_list *pdc_in = pr->pdc; 263 acpi_status status = AE_OK; 264 265 266 if (!pdc_in) 267 return status; 268 269 status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL); 270 271 if (ACPI_FAILURE(status)) 272 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 273 "Could not evaluate _PDC, using legacy perf. control...\n")); 274 275 return status; 276 } 277 278 /* -------------------------------------------------------------------------- 279 FS Interface (/proc) 280 -------------------------------------------------------------------------- */ 281 282 static struct proc_dir_entry *acpi_processor_dir = NULL; 283 284 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset) 285 { 286 struct acpi_processor *pr = seq->private; 287 288 289 if (!pr) 290 goto end; 291 292 seq_printf(seq, "processor id: %d\n" 293 "acpi id: %d\n" 294 "bus mastering control: %s\n" 295 "power management: %s\n" 296 "throttling control: %s\n" 297 "limit interface: %s\n", 298 pr->id, 299 pr->acpi_id, 300 pr->flags.bm_control ? "yes" : "no", 301 pr->flags.power ? "yes" : "no", 302 pr->flags.throttling ? "yes" : "no", 303 pr->flags.limit ? "yes" : "no"); 304 305 end: 306 return 0; 307 } 308 309 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file) 310 { 311 return single_open(file, acpi_processor_info_seq_show, 312 PDE(inode)->data); 313 } 314 315 static int acpi_processor_add_fs(struct acpi_device *device) 316 { 317 struct proc_dir_entry *entry = NULL; 318 319 320 if (!acpi_device_dir(device)) { 321 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), 322 acpi_processor_dir); 323 if (!acpi_device_dir(device)) 324 return -ENODEV; 325 } 326 acpi_device_dir(device)->owner = THIS_MODULE; 327 328 /* 'info' [R] */ 329 entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO, 330 S_IRUGO, acpi_device_dir(device)); 331 if (!entry) 332 return -EIO; 333 else { 334 entry->proc_fops = &acpi_processor_info_fops; 335 entry->data = acpi_driver_data(device); 336 entry->owner = THIS_MODULE; 337 } 338 339 /* 'throttling' [R/W] */ 340 entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, 341 S_IFREG | S_IRUGO | S_IWUSR, 342 acpi_device_dir(device)); 343 if (!entry) 344 return -EIO; 345 else { 346 entry->proc_fops = &acpi_processor_throttling_fops; 347 entry->data = acpi_driver_data(device); 348 entry->owner = THIS_MODULE; 349 } 350 351 /* 'limit' [R/W] */ 352 entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, 353 S_IFREG | S_IRUGO | S_IWUSR, 354 acpi_device_dir(device)); 355 if (!entry) 356 return -EIO; 357 else { 358 entry->proc_fops = &acpi_processor_limit_fops; 359 entry->data = acpi_driver_data(device); 360 entry->owner = THIS_MODULE; 361 } 362 363 return 0; 364 } 365 366 static int acpi_processor_remove_fs(struct acpi_device *device) 367 { 368 369 if (acpi_device_dir(device)) { 370 remove_proc_entry(ACPI_PROCESSOR_FILE_INFO, 371 acpi_device_dir(device)); 372 remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, 373 acpi_device_dir(device)); 374 remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, 375 acpi_device_dir(device)); 376 remove_proc_entry(acpi_device_bid(device), acpi_processor_dir); 377 acpi_device_dir(device) = NULL; 378 } 379 380 return 0; 381 } 382 383 /* Use the acpiid in MADT to map cpus in case of SMP */ 384 385 #ifndef CONFIG_SMP 386 static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;} 387 #else 388 389 static struct acpi_table_madt *madt; 390 391 static int map_lapic_id(struct acpi_subtable_header *entry, 392 u32 acpi_id, int *apic_id) 393 { 394 struct acpi_madt_local_apic *lapic = 395 (struct acpi_madt_local_apic *)entry; 396 if ((lapic->lapic_flags & ACPI_MADT_ENABLED) && 397 lapic->processor_id == acpi_id) { 398 *apic_id = lapic->id; 399 return 1; 400 } 401 return 0; 402 } 403 404 static int map_lsapic_id(struct acpi_subtable_header *entry, 405 u32 acpi_id, int *apic_id) 406 { 407 struct acpi_madt_local_sapic *lsapic = 408 (struct acpi_madt_local_sapic *)entry; 409 /* Only check enabled APICs*/ 410 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) { 411 /* First check against id */ 412 if (lsapic->processor_id == acpi_id) { 413 *apic_id = (lsapic->id << 8) | lsapic->eid; 414 return 1; 415 /* Check against optional uid */ 416 } else if (entry->length >= 16 && 417 lsapic->uid == acpi_id) { 418 *apic_id = lsapic->uid; 419 return 1; 420 } 421 } 422 return 0; 423 } 424 425 static int map_madt_entry(u32 acpi_id) 426 { 427 unsigned long madt_end, entry; 428 int apic_id = -1; 429 430 if (!madt) 431 return apic_id; 432 433 entry = (unsigned long)madt; 434 madt_end = entry + madt->header.length; 435 436 /* Parse all entries looking for a match. */ 437 438 entry += sizeof(struct acpi_table_madt); 439 while (entry + sizeof(struct acpi_subtable_header) < madt_end) { 440 struct acpi_subtable_header *header = 441 (struct acpi_subtable_header *)entry; 442 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { 443 if (map_lapic_id(header, acpi_id, &apic_id)) 444 break; 445 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 446 if (map_lsapic_id(header, acpi_id, &apic_id)) 447 break; 448 } 449 entry += header->length; 450 } 451 return apic_id; 452 } 453 454 static int map_mat_entry(acpi_handle handle, u32 acpi_id) 455 { 456 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 457 union acpi_object *obj; 458 struct acpi_subtable_header *header; 459 int apic_id = -1; 460 461 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 462 goto exit; 463 464 if (!buffer.length || !buffer.pointer) 465 goto exit; 466 467 obj = buffer.pointer; 468 if (obj->type != ACPI_TYPE_BUFFER || 469 obj->buffer.length < sizeof(struct acpi_subtable_header)) { 470 goto exit; 471 } 472 473 header = (struct acpi_subtable_header *)obj->buffer.pointer; 474 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { 475 map_lapic_id(header, acpi_id, &apic_id); 476 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 477 map_lsapic_id(header, acpi_id, &apic_id); 478 } 479 480 exit: 481 if (buffer.pointer) 482 kfree(buffer.pointer); 483 return apic_id; 484 } 485 486 static int get_cpu_id(acpi_handle handle, u32 acpi_id) 487 { 488 int i; 489 int apic_id = -1; 490 491 apic_id = map_mat_entry(handle, acpi_id); 492 if (apic_id == -1) 493 apic_id = map_madt_entry(acpi_id); 494 if (apic_id == -1) 495 return apic_id; 496 497 for_each_possible_cpu(i) { 498 if (cpu_physical_id(i) == apic_id) 499 return i; 500 } 501 return -1; 502 } 503 #endif 504 505 /* -------------------------------------------------------------------------- 506 Driver Interface 507 -------------------------------------------------------------------------- */ 508 509 static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid) 510 { 511 acpi_status status = 0; 512 union acpi_object object = { 0 }; 513 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 514 int cpu_index; 515 static int cpu0_initialized; 516 517 518 if (!pr) 519 return -EINVAL; 520 521 if (num_online_cpus() > 1) 522 errata.smp = TRUE; 523 524 acpi_processor_errata(pr); 525 526 /* 527 * Check to see if we have bus mastering arbitration control. This 528 * is required for proper C3 usage (to maintain cache coherency). 529 */ 530 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { 531 pr->flags.bm_control = 1; 532 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 533 "Bus mastering arbitration control present\n")); 534 } else 535 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 536 "No bus mastering arbitration control\n")); 537 538 /* Check if it is a Device with HID and UID */ 539 if (has_uid) { 540 unsigned long value; 541 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, 542 NULL, &value); 543 if (ACPI_FAILURE(status)) { 544 printk(KERN_ERR PREFIX "Evaluating processor _UID\n"); 545 return -ENODEV; 546 } 547 pr->acpi_id = value; 548 } else { 549 /* 550 * Evalute the processor object. Note that it is common on SMP to 551 * have the first (boot) processor with a valid PBLK address while 552 * all others have a NULL address. 553 */ 554 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 555 if (ACPI_FAILURE(status)) { 556 printk(KERN_ERR PREFIX "Evaluating processor object\n"); 557 return -ENODEV; 558 } 559 560 /* 561 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. 562 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c 563 */ 564 pr->acpi_id = object.processor.proc_id; 565 } 566 cpu_index = get_cpu_id(pr->handle, pr->acpi_id); 567 568 /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 569 if (!cpu0_initialized && (cpu_index == -1) && 570 (num_online_cpus() == 1)) { 571 cpu_index = 0; 572 } 573 574 cpu0_initialized = 1; 575 576 pr->id = cpu_index; 577 578 /* 579 * Extra Processor objects may be enumerated on MP systems with 580 * less than the max # of CPUs. They should be ignored _iff 581 * they are physically not present. 582 */ 583 if (pr->id == -1) { 584 if (ACPI_FAILURE 585 (acpi_processor_hotadd_init(pr->handle, &pr->id))) { 586 return -ENODEV; 587 } 588 } 589 590 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, 591 pr->acpi_id)); 592 593 if (!object.processor.pblk_address) 594 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); 595 else if (object.processor.pblk_length != 6) 596 printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n", 597 object.processor.pblk_length); 598 else { 599 pr->throttling.address = object.processor.pblk_address; 600 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; 601 pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 602 603 pr->pblk = object.processor.pblk_address; 604 605 /* 606 * We don't care about error returns - we just try to mark 607 * these reserved so that nobody else is confused into thinking 608 * that this region might be unused.. 609 * 610 * (In particular, allocating the IO range for Cardbus) 611 */ 612 request_region(pr->throttling.address, 6, "ACPI CPU throttle"); 613 } 614 615 return 0; 616 } 617 618 static void *processor_device_array[NR_CPUS]; 619 620 static int __cpuinit acpi_processor_start(struct acpi_device *device) 621 { 622 int result = 0; 623 acpi_status status = AE_OK; 624 struct acpi_processor *pr; 625 626 627 pr = acpi_driver_data(device); 628 629 result = acpi_processor_get_info(pr, device->flags.unique_id); 630 if (result) { 631 /* Processor is physically not present */ 632 return 0; 633 } 634 635 BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); 636 637 /* 638 * Buggy BIOS check 639 * ACPI id of processors can be reported wrongly by the BIOS. 640 * Don't trust it blindly 641 */ 642 if (processor_device_array[pr->id] != NULL && 643 processor_device_array[pr->id] != device) { 644 printk(KERN_WARNING "BIOS reported wrong ACPI id " 645 "for the processor\n"); 646 return -ENODEV; 647 } 648 processor_device_array[pr->id] = device; 649 650 processors[pr->id] = pr; 651 652 result = acpi_processor_add_fs(device); 653 if (result) 654 goto end; 655 656 status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, 657 acpi_processor_notify, pr); 658 659 /* _PDC call should be done before doing anything else (if reqd.). */ 660 arch_acpi_processor_init_pdc(pr); 661 acpi_processor_set_pdc(pr); 662 #ifdef CONFIG_CPU_FREQ 663 acpi_processor_ppc_has_changed(pr); 664 #endif 665 acpi_processor_get_throttling_info(pr); 666 acpi_processor_get_limit_info(pr); 667 668 669 acpi_processor_power_init(pr, device); 670 671 if (pr->flags.throttling) { 672 printk(KERN_INFO PREFIX "%s [%s] (supports", 673 acpi_device_name(device), acpi_device_bid(device)); 674 printk(" %d throttling states", pr->throttling.state_count); 675 printk(")\n"); 676 } 677 678 end: 679 680 return result; 681 } 682 683 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data) 684 { 685 struct acpi_processor *pr = data; 686 struct acpi_device *device = NULL; 687 int saved; 688 689 if (!pr) 690 return; 691 692 if (acpi_bus_get_device(pr->handle, &device)) 693 return; 694 695 switch (event) { 696 case ACPI_PROCESSOR_NOTIFY_PERFORMANCE: 697 saved = pr->performance_platform_limit; 698 acpi_processor_ppc_has_changed(pr); 699 if (saved == pr->performance_platform_limit) 700 break; 701 acpi_bus_generate_proc_event(device, event, 702 pr->performance_platform_limit); 703 acpi_bus_generate_netlink_event(device->pnp.device_class, 704 device->dev.bus_id, event, 705 pr->performance_platform_limit); 706 break; 707 case ACPI_PROCESSOR_NOTIFY_POWER: 708 acpi_processor_cst_has_changed(pr); 709 acpi_bus_generate_proc_event(device, event, 0); 710 acpi_bus_generate_netlink_event(device->pnp.device_class, 711 device->dev.bus_id, event, 0); 712 break; 713 case ACPI_PROCESSOR_NOTIFY_THROTTLING: 714 acpi_processor_tstate_has_changed(pr); 715 acpi_bus_generate_proc_event(device, event, 0); 716 acpi_bus_generate_netlink_event(device->pnp.device_class, 717 device->dev.bus_id, event, 0); 718 default: 719 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 720 "Unsupported event [0x%x]\n", event)); 721 break; 722 } 723 724 return; 725 } 726 727 static int acpi_cpu_soft_notify(struct notifier_block *nfb, 728 unsigned long action, void *hcpu) 729 { 730 unsigned int cpu = (unsigned long)hcpu; 731 struct acpi_processor *pr = processors[cpu]; 732 733 if (action == CPU_ONLINE && pr) { 734 acpi_processor_ppc_has_changed(pr); 735 acpi_processor_cst_has_changed(pr); 736 acpi_processor_tstate_has_changed(pr); 737 } 738 return NOTIFY_OK; 739 } 740 741 static struct notifier_block acpi_cpu_notifier = 742 { 743 .notifier_call = acpi_cpu_soft_notify, 744 }; 745 746 static int acpi_processor_add(struct acpi_device *device) 747 { 748 struct acpi_processor *pr = NULL; 749 750 751 if (!device) 752 return -EINVAL; 753 754 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); 755 if (!pr) 756 return -ENOMEM; 757 758 pr->handle = device->handle; 759 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 760 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 761 acpi_driver_data(device) = pr; 762 763 return 0; 764 } 765 766 static int acpi_processor_remove(struct acpi_device *device, int type) 767 { 768 acpi_status status = AE_OK; 769 struct acpi_processor *pr = NULL; 770 771 772 if (!device || !acpi_driver_data(device)) 773 return -EINVAL; 774 775 pr = acpi_driver_data(device); 776 777 if (pr->id >= nr_cpu_ids) { 778 kfree(pr); 779 return 0; 780 } 781 782 if (type == ACPI_BUS_REMOVAL_EJECT) { 783 if (acpi_processor_handle_eject(pr)) 784 return -EINVAL; 785 } 786 787 acpi_processor_power_exit(pr, device); 788 789 status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, 790 acpi_processor_notify); 791 792 acpi_processor_remove_fs(device); 793 794 processors[pr->id] = NULL; 795 796 kfree(pr); 797 798 return 0; 799 } 800 801 #ifdef CONFIG_ACPI_HOTPLUG_CPU 802 /**************************************************************************** 803 * Acpi processor hotplug support * 804 ****************************************************************************/ 805 806 static int is_processor_present(acpi_handle handle); 807 808 static int is_processor_present(acpi_handle handle) 809 { 810 acpi_status status; 811 unsigned long sta = 0; 812 813 814 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 815 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) { 816 ACPI_EXCEPTION((AE_INFO, status, "Processor Device is not present")); 817 return 0; 818 } 819 return 1; 820 } 821 822 static 823 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device) 824 { 825 acpi_handle phandle; 826 struct acpi_device *pdev; 827 struct acpi_processor *pr; 828 829 830 if (acpi_get_parent(handle, &phandle)) { 831 return -ENODEV; 832 } 833 834 if (acpi_bus_get_device(phandle, &pdev)) { 835 return -ENODEV; 836 } 837 838 if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) { 839 return -ENODEV; 840 } 841 842 acpi_bus_start(*device); 843 844 pr = acpi_driver_data(*device); 845 if (!pr) 846 return -ENODEV; 847 848 if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) { 849 kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE); 850 } 851 return 0; 852 } 853 854 static void 855 acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data) 856 { 857 struct acpi_processor *pr; 858 struct acpi_device *device = NULL; 859 int result; 860 861 862 switch (event) { 863 case ACPI_NOTIFY_BUS_CHECK: 864 case ACPI_NOTIFY_DEVICE_CHECK: 865 printk("Processor driver received %s event\n", 866 (event == ACPI_NOTIFY_BUS_CHECK) ? 867 "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"); 868 869 if (!is_processor_present(handle)) 870 break; 871 872 if (acpi_bus_get_device(handle, &device)) { 873 result = acpi_processor_device_add(handle, &device); 874 if (result) 875 printk(KERN_ERR PREFIX 876 "Unable to add the device\n"); 877 break; 878 } 879 880 pr = acpi_driver_data(device); 881 if (!pr) { 882 printk(KERN_ERR PREFIX "Driver data is NULL\n"); 883 break; 884 } 885 886 if (pr->id >= 0 && (pr->id < nr_cpu_ids)) { 887 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); 888 break; 889 } 890 891 result = acpi_processor_start(device); 892 if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) { 893 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE); 894 } else { 895 printk(KERN_ERR PREFIX "Device [%s] failed to start\n", 896 acpi_device_bid(device)); 897 } 898 break; 899 case ACPI_NOTIFY_EJECT_REQUEST: 900 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 901 "received ACPI_NOTIFY_EJECT_REQUEST\n")); 902 903 if (acpi_bus_get_device(handle, &device)) { 904 printk(KERN_ERR PREFIX 905 "Device don't exist, dropping EJECT\n"); 906 break; 907 } 908 pr = acpi_driver_data(device); 909 if (!pr) { 910 printk(KERN_ERR PREFIX 911 "Driver data is NULL, dropping EJECT\n"); 912 return; 913 } 914 915 if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id))) 916 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); 917 break; 918 default: 919 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 920 "Unsupported event [0x%x]\n", event)); 921 break; 922 } 923 924 return; 925 } 926 927 static acpi_status 928 processor_walk_namespace_cb(acpi_handle handle, 929 u32 lvl, void *context, void **rv) 930 { 931 acpi_status status; 932 int *action = context; 933 acpi_object_type type = 0; 934 935 status = acpi_get_type(handle, &type); 936 if (ACPI_FAILURE(status)) 937 return (AE_OK); 938 939 if (type != ACPI_TYPE_PROCESSOR) 940 return (AE_OK); 941 942 switch (*action) { 943 case INSTALL_NOTIFY_HANDLER: 944 acpi_install_notify_handler(handle, 945 ACPI_SYSTEM_NOTIFY, 946 acpi_processor_hotplug_notify, 947 NULL); 948 break; 949 case UNINSTALL_NOTIFY_HANDLER: 950 acpi_remove_notify_handler(handle, 951 ACPI_SYSTEM_NOTIFY, 952 acpi_processor_hotplug_notify); 953 break; 954 default: 955 break; 956 } 957 958 return (AE_OK); 959 } 960 961 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) 962 { 963 964 if (!is_processor_present(handle)) { 965 return AE_ERROR; 966 } 967 968 if (acpi_map_lsapic(handle, p_cpu)) 969 return AE_ERROR; 970 971 if (arch_register_cpu(*p_cpu)) { 972 acpi_unmap_lsapic(*p_cpu); 973 return AE_ERROR; 974 } 975 976 return AE_OK; 977 } 978 979 static int acpi_processor_handle_eject(struct acpi_processor *pr) 980 { 981 if (cpu_online(pr->id)) { 982 return (-EINVAL); 983 } 984 arch_unregister_cpu(pr->id); 985 acpi_unmap_lsapic(pr->id); 986 return (0); 987 } 988 #else 989 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) 990 { 991 return AE_ERROR; 992 } 993 static int acpi_processor_handle_eject(struct acpi_processor *pr) 994 { 995 return (-EINVAL); 996 } 997 #endif 998 999 static 1000 void acpi_processor_install_hotplug_notify(void) 1001 { 1002 #ifdef CONFIG_ACPI_HOTPLUG_CPU 1003 int action = INSTALL_NOTIFY_HANDLER; 1004 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, 1005 ACPI_ROOT_OBJECT, 1006 ACPI_UINT32_MAX, 1007 processor_walk_namespace_cb, &action, NULL); 1008 #endif 1009 register_hotcpu_notifier(&acpi_cpu_notifier); 1010 } 1011 1012 static 1013 void acpi_processor_uninstall_hotplug_notify(void) 1014 { 1015 #ifdef CONFIG_ACPI_HOTPLUG_CPU 1016 int action = UNINSTALL_NOTIFY_HANDLER; 1017 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, 1018 ACPI_ROOT_OBJECT, 1019 ACPI_UINT32_MAX, 1020 processor_walk_namespace_cb, &action, NULL); 1021 #endif 1022 unregister_hotcpu_notifier(&acpi_cpu_notifier); 1023 } 1024 1025 /* 1026 * We keep the driver loaded even when ACPI is not running. 1027 * This is needed for the powernow-k8 driver, that works even without 1028 * ACPI, but needs symbols from this driver 1029 */ 1030 1031 static int __init acpi_processor_init(void) 1032 { 1033 int result = 0; 1034 1035 1036 memset(&processors, 0, sizeof(processors)); 1037 memset(&errata, 0, sizeof(errata)); 1038 1039 #ifdef CONFIG_SMP 1040 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, 1041 (struct acpi_table_header **)&madt))) 1042 madt = NULL; 1043 #endif 1044 1045 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); 1046 if (!acpi_processor_dir) 1047 return -ENOMEM; 1048 acpi_processor_dir->owner = THIS_MODULE; 1049 1050 result = cpuidle_register_driver(&acpi_idle_driver); 1051 if (result < 0) 1052 goto out_proc; 1053 1054 result = acpi_bus_register_driver(&acpi_processor_driver); 1055 if (result < 0) 1056 goto out_cpuidle; 1057 1058 acpi_processor_install_hotplug_notify(); 1059 1060 acpi_thermal_cpufreq_init(); 1061 1062 acpi_processor_ppc_init(); 1063 1064 return 0; 1065 1066 out_cpuidle: 1067 cpuidle_unregister_driver(&acpi_idle_driver); 1068 1069 out_proc: 1070 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); 1071 1072 return result; 1073 } 1074 1075 static void __exit acpi_processor_exit(void) 1076 { 1077 acpi_processor_ppc_exit(); 1078 1079 acpi_thermal_cpufreq_exit(); 1080 1081 acpi_processor_uninstall_hotplug_notify(); 1082 1083 acpi_bus_unregister_driver(&acpi_processor_driver); 1084 1085 cpuidle_unregister_driver(&acpi_idle_driver); 1086 1087 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); 1088 1089 return; 1090 } 1091 1092 module_init(acpi_processor_init); 1093 module_exit(acpi_processor_exit); 1094 1095 EXPORT_SYMBOL(acpi_processor_set_thermal_limit); 1096 1097 MODULE_ALIAS("processor"); 1098