1 /* 2 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $) 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * 11 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or (at 16 * your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, but 19 * WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 * General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License along 24 * with this program; if not, write to the Free Software Foundation, Inc., 25 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 26 * 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/cpufreq.h> 33 34 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF 35 #include <linux/proc_fs.h> 36 #include <linux/seq_file.h> 37 38 #include <asm/uaccess.h> 39 #endif 40 41 #include <acpi/acpi_bus.h> 42 #include <acpi/processor.h> 43 44 #define ACPI_PROCESSOR_COMPONENT 0x01000000 45 #define ACPI_PROCESSOR_CLASS "processor" 46 #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" 47 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" 48 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 49 ACPI_MODULE_NAME("acpi_processor") 50 51 static DECLARE_MUTEX(performance_sem); 52 53 /* 54 * _PPC support is implemented as a CPUfreq policy notifier: 55 * This means each time a CPUfreq driver registered also with 56 * the ACPI core is asked to change the speed policy, the maximum 57 * value is adjusted so that it is within the platform limit. 58 * 59 * Also, when a new platform limit value is detected, the CPUfreq 60 * policy is adjusted accordingly. 61 */ 62 63 #define PPC_REGISTERED 1 64 #define PPC_IN_USE 2 65 66 static int acpi_processor_ppc_status = 0; 67 68 static int acpi_processor_ppc_notifier(struct notifier_block *nb, 69 unsigned long event, void *data) 70 { 71 struct cpufreq_policy *policy = data; 72 struct acpi_processor *pr; 73 unsigned int ppc = 0; 74 75 down(&performance_sem); 76 77 if (event != CPUFREQ_INCOMPATIBLE) 78 goto out; 79 80 pr = processors[policy->cpu]; 81 if (!pr || !pr->performance) 82 goto out; 83 84 ppc = (unsigned int)pr->performance_platform_limit; 85 if (!ppc) 86 goto out; 87 88 if (ppc > pr->performance->state_count) 89 goto out; 90 91 cpufreq_verify_within_limits(policy, 0, 92 pr->performance->states[ppc]. 93 core_frequency * 1000); 94 95 out: 96 up(&performance_sem); 97 98 return 0; 99 } 100 101 static struct notifier_block acpi_ppc_notifier_block = { 102 .notifier_call = acpi_processor_ppc_notifier, 103 }; 104 105 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 106 { 107 acpi_status status = 0; 108 unsigned long ppc = 0; 109 110 ACPI_FUNCTION_TRACE("acpi_processor_get_platform_limit"); 111 112 if (!pr) 113 return_VALUE(-EINVAL); 114 115 /* 116 * _PPC indicates the maximum state currently supported by the platform 117 * (e.g. 0 = states 0..n; 1 = states 1..n; etc. 118 */ 119 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); 120 121 if (status != AE_NOT_FOUND) 122 acpi_processor_ppc_status |= PPC_IN_USE; 123 124 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 125 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PPC\n")); 126 return_VALUE(-ENODEV); 127 } 128 129 pr->performance_platform_limit = (int)ppc; 130 131 return_VALUE(0); 132 } 133 134 int acpi_processor_ppc_has_changed(struct acpi_processor *pr) 135 { 136 int ret = acpi_processor_get_platform_limit(pr); 137 if (ret < 0) 138 return (ret); 139 else 140 return cpufreq_update_policy(pr->id); 141 } 142 143 void acpi_processor_ppc_init(void) 144 { 145 if (!cpufreq_register_notifier 146 (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER)) 147 acpi_processor_ppc_status |= PPC_REGISTERED; 148 else 149 printk(KERN_DEBUG 150 "Warning: Processor Platform Limit not supported.\n"); 151 } 152 153 void acpi_processor_ppc_exit(void) 154 { 155 if (acpi_processor_ppc_status & PPC_REGISTERED) 156 cpufreq_unregister_notifier(&acpi_ppc_notifier_block, 157 CPUFREQ_POLICY_NOTIFIER); 158 159 acpi_processor_ppc_status &= ~PPC_REGISTERED; 160 } 161 162 static int acpi_processor_get_performance_control(struct acpi_processor *pr) 163 { 164 int result = 0; 165 acpi_status status = 0; 166 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 167 union acpi_object *pct = NULL; 168 union acpi_object obj = { 0 }; 169 170 ACPI_FUNCTION_TRACE("acpi_processor_get_performance_control"); 171 172 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); 173 if (ACPI_FAILURE(status)) { 174 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PCT\n")); 175 return_VALUE(-ENODEV); 176 } 177 178 pct = (union acpi_object *)buffer.pointer; 179 if (!pct || (pct->type != ACPI_TYPE_PACKAGE) 180 || (pct->package.count != 2)) { 181 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PCT data\n")); 182 result = -EFAULT; 183 goto end; 184 } 185 186 /* 187 * control_register 188 */ 189 190 obj = pct->package.elements[0]; 191 192 if ((obj.type != ACPI_TYPE_BUFFER) 193 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 194 || (obj.buffer.pointer == NULL)) { 195 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 196 "Invalid _PCT data (control_register)\n")); 197 result = -EFAULT; 198 goto end; 199 } 200 memcpy(&pr->performance->control_register, obj.buffer.pointer, 201 sizeof(struct acpi_pct_register)); 202 203 /* 204 * status_register 205 */ 206 207 obj = pct->package.elements[1]; 208 209 if ((obj.type != ACPI_TYPE_BUFFER) 210 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 211 || (obj.buffer.pointer == NULL)) { 212 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 213 "Invalid _PCT data (status_register)\n")); 214 result = -EFAULT; 215 goto end; 216 } 217 218 memcpy(&pr->performance->status_register, obj.buffer.pointer, 219 sizeof(struct acpi_pct_register)); 220 221 end: 222 acpi_os_free(buffer.pointer); 223 224 return_VALUE(result); 225 } 226 227 static int acpi_processor_get_performance_states(struct acpi_processor *pr) 228 { 229 int result = 0; 230 acpi_status status = AE_OK; 231 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 232 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" }; 233 struct acpi_buffer state = { 0, NULL }; 234 union acpi_object *pss = NULL; 235 int i; 236 237 ACPI_FUNCTION_TRACE("acpi_processor_get_performance_states"); 238 239 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 240 if (ACPI_FAILURE(status)) { 241 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PSS\n")); 242 return_VALUE(-ENODEV); 243 } 244 245 pss = (union acpi_object *)buffer.pointer; 246 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) { 247 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data\n")); 248 result = -EFAULT; 249 goto end; 250 } 251 252 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n", 253 pss->package.count)); 254 255 pr->performance->state_count = pss->package.count; 256 pr->performance->states = 257 kmalloc(sizeof(struct acpi_processor_px) * pss->package.count, 258 GFP_KERNEL); 259 if (!pr->performance->states) { 260 result = -ENOMEM; 261 goto end; 262 } 263 264 for (i = 0; i < pr->performance->state_count; i++) { 265 266 struct acpi_processor_px *px = &(pr->performance->states[i]); 267 268 state.length = sizeof(struct acpi_processor_px); 269 state.pointer = px; 270 271 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); 272 273 status = acpi_extract_package(&(pss->package.elements[i]), 274 &format, &state); 275 if (ACPI_FAILURE(status)) { 276 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 277 "Invalid _PSS data\n")); 278 result = -EFAULT; 279 kfree(pr->performance->states); 280 goto end; 281 } 282 283 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 284 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", 285 i, 286 (u32) px->core_frequency, 287 (u32) px->power, 288 (u32) px->transition_latency, 289 (u32) px->bus_master_latency, 290 (u32) px->control, (u32) px->status)); 291 292 if (!px->core_frequency) { 293 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 294 "Invalid _PSS data: freq is zero\n")); 295 result = -EFAULT; 296 kfree(pr->performance->states); 297 goto end; 298 } 299 } 300 301 end: 302 acpi_os_free(buffer.pointer); 303 304 return_VALUE(result); 305 } 306 307 static int acpi_processor_get_performance_info(struct acpi_processor *pr) 308 { 309 int result = 0; 310 acpi_status status = AE_OK; 311 acpi_handle handle = NULL; 312 313 ACPI_FUNCTION_TRACE("acpi_processor_get_performance_info"); 314 315 if (!pr || !pr->performance || !pr->handle) 316 return_VALUE(-EINVAL); 317 318 status = acpi_get_handle(pr->handle, "_PCT", &handle); 319 if (ACPI_FAILURE(status)) { 320 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 321 "ACPI-based processor performance control unavailable\n")); 322 return_VALUE(-ENODEV); 323 } 324 325 result = acpi_processor_get_performance_control(pr); 326 if (result) 327 return_VALUE(result); 328 329 result = acpi_processor_get_performance_states(pr); 330 if (result) 331 return_VALUE(result); 332 333 result = acpi_processor_get_platform_limit(pr); 334 if (result) 335 return_VALUE(result); 336 337 return_VALUE(0); 338 } 339 340 int acpi_processor_notify_smm(struct module *calling_module) 341 { 342 acpi_status status; 343 static int is_done = 0; 344 345 ACPI_FUNCTION_TRACE("acpi_processor_notify_smm"); 346 347 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 348 return_VALUE(-EBUSY); 349 350 if (!try_module_get(calling_module)) 351 return_VALUE(-EINVAL); 352 353 /* is_done is set to negative if an error occured, 354 * and to postitive if _no_ error occured, but SMM 355 * was already notified. This avoids double notification 356 * which might lead to unexpected results... 357 */ 358 if (is_done > 0) { 359 module_put(calling_module); 360 return_VALUE(0); 361 } else if (is_done < 0) { 362 module_put(calling_module); 363 return_VALUE(is_done); 364 } 365 366 is_done = -EIO; 367 368 /* Can't write pstate_cnt to smi_cmd if either value is zero */ 369 if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) { 370 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n")); 371 module_put(calling_module); 372 return_VALUE(0); 373 } 374 375 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 376 "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n", 377 acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd)); 378 379 /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use 380 * it anyway, so we need to support it... */ 381 if (acpi_fadt_is_v1) { 382 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 383 "Using v1.0 FADT reserved value for pstate_cnt\n")); 384 } 385 386 status = acpi_os_write_port(acpi_fadt.smi_cmd, 387 (u32) acpi_fadt.pstate_cnt, 8); 388 if (ACPI_FAILURE(status)) { 389 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 390 "Failed to write pstate_cnt [0x%x] to " 391 "smi_cmd [0x%x]\n", acpi_fadt.pstate_cnt, 392 acpi_fadt.smi_cmd)); 393 module_put(calling_module); 394 return_VALUE(status); 395 } 396 397 /* Success. If there's no _PPC, we need to fear nothing, so 398 * we can allow the cpufreq driver to be rmmod'ed. */ 399 is_done = 1; 400 401 if (!(acpi_processor_ppc_status & PPC_IN_USE)) 402 module_put(calling_module); 403 404 return_VALUE(0); 405 } 406 407 EXPORT_SYMBOL(acpi_processor_notify_smm); 408 409 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF 410 /* /proc/acpi/processor/../performance interface (DEPRECATED) */ 411 412 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file); 413 static struct file_operations acpi_processor_perf_fops = { 414 .open = acpi_processor_perf_open_fs, 415 .read = seq_read, 416 .llseek = seq_lseek, 417 .release = single_release, 418 }; 419 420 static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset) 421 { 422 struct acpi_processor *pr = (struct acpi_processor *)seq->private; 423 int i; 424 425 ACPI_FUNCTION_TRACE("acpi_processor_perf_seq_show"); 426 427 if (!pr) 428 goto end; 429 430 if (!pr->performance) { 431 seq_puts(seq, "<not supported>\n"); 432 goto end; 433 } 434 435 seq_printf(seq, "state count: %d\n" 436 "active state: P%d\n", 437 pr->performance->state_count, pr->performance->state); 438 439 seq_puts(seq, "states:\n"); 440 for (i = 0; i < pr->performance->state_count; i++) 441 seq_printf(seq, 442 " %cP%d: %d MHz, %d mW, %d uS\n", 443 (i == pr->performance->state ? '*' : ' '), i, 444 (u32) pr->performance->states[i].core_frequency, 445 (u32) pr->performance->states[i].power, 446 (u32) pr->performance->states[i].transition_latency); 447 448 end: 449 return_VALUE(0); 450 } 451 452 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file) 453 { 454 return single_open(file, acpi_processor_perf_seq_show, 455 PDE(inode)->data); 456 } 457 458 static ssize_t 459 acpi_processor_write_performance(struct file *file, 460 const char __user * buffer, 461 size_t count, loff_t * data) 462 { 463 int result = 0; 464 struct seq_file *m = (struct seq_file *)file->private_data; 465 struct acpi_processor *pr = (struct acpi_processor *)m->private; 466 struct acpi_processor_performance *perf; 467 char state_string[12] = { '\0' }; 468 unsigned int new_state = 0; 469 struct cpufreq_policy policy; 470 471 ACPI_FUNCTION_TRACE("acpi_processor_write_performance"); 472 473 if (!pr || (count > sizeof(state_string) - 1)) 474 return_VALUE(-EINVAL); 475 476 perf = pr->performance; 477 if (!perf) 478 return_VALUE(-EINVAL); 479 480 if (copy_from_user(state_string, buffer, count)) 481 return_VALUE(-EFAULT); 482 483 state_string[count] = '\0'; 484 new_state = simple_strtoul(state_string, NULL, 0); 485 486 if (new_state >= perf->state_count) 487 return_VALUE(-EINVAL); 488 489 cpufreq_get_policy(&policy, pr->id); 490 491 policy.cpu = pr->id; 492 policy.min = perf->states[new_state].core_frequency * 1000; 493 policy.max = perf->states[new_state].core_frequency * 1000; 494 495 result = cpufreq_set_policy(&policy); 496 if (result) 497 return_VALUE(result); 498 499 return_VALUE(count); 500 } 501 502 static void acpi_cpufreq_add_file(struct acpi_processor *pr) 503 { 504 struct proc_dir_entry *entry = NULL; 505 struct acpi_device *device = NULL; 506 507 ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile"); 508 509 if (acpi_bus_get_device(pr->handle, &device)) 510 return_VOID; 511 512 /* add file 'performance' [R/W] */ 513 entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE, 514 S_IFREG | S_IRUGO | S_IWUSR, 515 acpi_device_dir(device)); 516 if (!entry) 517 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 518 "Unable to create '%s' fs entry\n", 519 ACPI_PROCESSOR_FILE_PERFORMANCE)); 520 else { 521 acpi_processor_perf_fops.write = acpi_processor_write_performance; 522 entry->proc_fops = &acpi_processor_perf_fops; 523 entry->data = acpi_driver_data(device); 524 entry->owner = THIS_MODULE; 525 } 526 return_VOID; 527 } 528 529 static void acpi_cpufreq_remove_file(struct acpi_processor *pr) 530 { 531 struct acpi_device *device = NULL; 532 533 ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile"); 534 535 if (acpi_bus_get_device(pr->handle, &device)) 536 return_VOID; 537 538 /* remove file 'performance' */ 539 remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE, 540 acpi_device_dir(device)); 541 542 return_VOID; 543 } 544 545 #else 546 static void acpi_cpufreq_add_file(struct acpi_processor *pr) 547 { 548 return; 549 } 550 static void acpi_cpufreq_remove_file(struct acpi_processor *pr) 551 { 552 return; 553 } 554 #endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */ 555 556 int 557 acpi_processor_register_performance(struct acpi_processor_performance 558 *performance, unsigned int cpu) 559 { 560 struct acpi_processor *pr; 561 562 ACPI_FUNCTION_TRACE("acpi_processor_register_performance"); 563 564 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 565 return_VALUE(-EINVAL); 566 567 down(&performance_sem); 568 569 pr = processors[cpu]; 570 if (!pr) { 571 up(&performance_sem); 572 return_VALUE(-ENODEV); 573 } 574 575 if (pr->performance) { 576 up(&performance_sem); 577 return_VALUE(-EBUSY); 578 } 579 580 pr->performance = performance; 581 582 if (acpi_processor_get_performance_info(pr)) { 583 pr->performance = NULL; 584 up(&performance_sem); 585 return_VALUE(-EIO); 586 } 587 588 acpi_cpufreq_add_file(pr); 589 590 up(&performance_sem); 591 return_VALUE(0); 592 } 593 594 EXPORT_SYMBOL(acpi_processor_register_performance); 595 596 void 597 acpi_processor_unregister_performance(struct acpi_processor_performance 598 *performance, unsigned int cpu) 599 { 600 struct acpi_processor *pr; 601 602 ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance"); 603 604 down(&performance_sem); 605 606 pr = processors[cpu]; 607 if (!pr) { 608 up(&performance_sem); 609 return_VOID; 610 } 611 612 kfree(pr->performance->states); 613 pr->performance = NULL; 614 615 acpi_cpufreq_remove_file(pr); 616 617 up(&performance_sem); 618 619 return_VOID; 620 } 621 622 EXPORT_SYMBOL(acpi_processor_unregister_performance); 623