1 /* 2 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $) 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * 11 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or (at 16 * your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, but 19 * WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 * General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License along 24 * with this program; if not, write to the Free Software Foundation, Inc., 25 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 26 * 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/cpufreq.h> 33 34 #ifdef CONFIG_X86 35 #include <asm/cpufeature.h> 36 #endif 37 38 #include <acpi/acpi_bus.h> 39 #include <acpi/acpi_drivers.h> 40 #include <acpi/processor.h> 41 42 #define PREFIX "ACPI: " 43 44 #define ACPI_PROCESSOR_CLASS "processor" 45 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" 46 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 47 ACPI_MODULE_NAME("processor_perflib"); 48 49 static DEFINE_MUTEX(performance_mutex); 50 51 /* Use cpufreq debug layer for _PPC changes. */ 52 #define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ 53 "cpufreq-core", msg) 54 55 /* 56 * _PPC support is implemented as a CPUfreq policy notifier: 57 * This means each time a CPUfreq driver registered also with 58 * the ACPI core is asked to change the speed policy, the maximum 59 * value is adjusted so that it is within the platform limit. 60 * 61 * Also, when a new platform limit value is detected, the CPUfreq 62 * policy is adjusted accordingly. 63 */ 64 65 /* ignore_ppc: 66 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet 67 * ignore _PPC 68 * 0 -> cpufreq low level drivers initialized -> consider _PPC values 69 * 1 -> ignore _PPC totally -> forced by user through boot param 70 */ 71 static int ignore_ppc = -1; 72 module_param(ignore_ppc, int, 0644); 73 MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ 74 "limited by BIOS, this should help"); 75 76 #define PPC_REGISTERED 1 77 #define PPC_IN_USE 2 78 79 static int acpi_processor_ppc_status; 80 81 static int acpi_processor_ppc_notifier(struct notifier_block *nb, 82 unsigned long event, void *data) 83 { 84 struct cpufreq_policy *policy = data; 85 struct acpi_processor *pr; 86 unsigned int ppc = 0; 87 88 if (event == CPUFREQ_START && ignore_ppc <= 0) { 89 ignore_ppc = 0; 90 return 0; 91 } 92 93 if (ignore_ppc) 94 return 0; 95 96 if (event != CPUFREQ_INCOMPATIBLE) 97 return 0; 98 99 mutex_lock(&performance_mutex); 100 101 pr = per_cpu(processors, policy->cpu); 102 if (!pr || !pr->performance) 103 goto out; 104 105 ppc = (unsigned int)pr->performance_platform_limit; 106 107 if (ppc >= pr->performance->state_count) 108 goto out; 109 110 cpufreq_verify_within_limits(policy, 0, 111 pr->performance->states[ppc]. 112 core_frequency * 1000); 113 114 out: 115 mutex_unlock(&performance_mutex); 116 117 return 0; 118 } 119 120 static struct notifier_block acpi_ppc_notifier_block = { 121 .notifier_call = acpi_processor_ppc_notifier, 122 }; 123 124 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 125 { 126 acpi_status status = 0; 127 unsigned long long ppc = 0; 128 129 130 if (!pr) 131 return -EINVAL; 132 133 /* 134 * _PPC indicates the maximum state currently supported by the platform 135 * (e.g. 0 = states 0..n; 1 = states 1..n; etc. 136 */ 137 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); 138 139 if (status != AE_NOT_FOUND) 140 acpi_processor_ppc_status |= PPC_IN_USE; 141 142 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 143 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC")); 144 return -ENODEV; 145 } 146 147 cpufreq_printk("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, 148 (int)ppc, ppc ? "" : "not"); 149 150 pr->performance_platform_limit = (int)ppc; 151 152 return 0; 153 } 154 155 int acpi_processor_ppc_has_changed(struct acpi_processor *pr) 156 { 157 int ret; 158 159 if (ignore_ppc) 160 return 0; 161 162 ret = acpi_processor_get_platform_limit(pr); 163 164 if (ret < 0) 165 return (ret); 166 else 167 return cpufreq_update_policy(pr->id); 168 } 169 170 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) 171 { 172 struct acpi_processor *pr; 173 174 pr = per_cpu(processors, cpu); 175 if (!pr || !pr->performance || !pr->performance->state_count) 176 return -ENODEV; 177 *limit = pr->performance->states[pr->performance_platform_limit]. 178 core_frequency * 1000; 179 return 0; 180 } 181 EXPORT_SYMBOL(acpi_processor_get_bios_limit); 182 183 void acpi_processor_ppc_init(void) 184 { 185 if (!cpufreq_register_notifier 186 (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER)) 187 acpi_processor_ppc_status |= PPC_REGISTERED; 188 else 189 printk(KERN_DEBUG 190 "Warning: Processor Platform Limit not supported.\n"); 191 } 192 193 void acpi_processor_ppc_exit(void) 194 { 195 if (acpi_processor_ppc_status & PPC_REGISTERED) 196 cpufreq_unregister_notifier(&acpi_ppc_notifier_block, 197 CPUFREQ_POLICY_NOTIFIER); 198 199 acpi_processor_ppc_status &= ~PPC_REGISTERED; 200 } 201 202 static int acpi_processor_get_performance_control(struct acpi_processor *pr) 203 { 204 int result = 0; 205 acpi_status status = 0; 206 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 207 union acpi_object *pct = NULL; 208 union acpi_object obj = { 0 }; 209 210 211 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); 212 if (ACPI_FAILURE(status)) { 213 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT")); 214 return -ENODEV; 215 } 216 217 pct = (union acpi_object *)buffer.pointer; 218 if (!pct || (pct->type != ACPI_TYPE_PACKAGE) 219 || (pct->package.count != 2)) { 220 printk(KERN_ERR PREFIX "Invalid _PCT data\n"); 221 result = -EFAULT; 222 goto end; 223 } 224 225 /* 226 * control_register 227 */ 228 229 obj = pct->package.elements[0]; 230 231 if ((obj.type != ACPI_TYPE_BUFFER) 232 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 233 || (obj.buffer.pointer == NULL)) { 234 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n"); 235 result = -EFAULT; 236 goto end; 237 } 238 memcpy(&pr->performance->control_register, obj.buffer.pointer, 239 sizeof(struct acpi_pct_register)); 240 241 /* 242 * status_register 243 */ 244 245 obj = pct->package.elements[1]; 246 247 if ((obj.type != ACPI_TYPE_BUFFER) 248 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 249 || (obj.buffer.pointer == NULL)) { 250 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n"); 251 result = -EFAULT; 252 goto end; 253 } 254 255 memcpy(&pr->performance->status_register, obj.buffer.pointer, 256 sizeof(struct acpi_pct_register)); 257 258 end: 259 kfree(buffer.pointer); 260 261 return result; 262 } 263 264 static int acpi_processor_get_performance_states(struct acpi_processor *pr) 265 { 266 int result = 0; 267 acpi_status status = AE_OK; 268 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 269 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" }; 270 struct acpi_buffer state = { 0, NULL }; 271 union acpi_object *pss = NULL; 272 int i; 273 274 275 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 276 if (ACPI_FAILURE(status)) { 277 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS")); 278 return -ENODEV; 279 } 280 281 pss = buffer.pointer; 282 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) { 283 printk(KERN_ERR PREFIX "Invalid _PSS data\n"); 284 result = -EFAULT; 285 goto end; 286 } 287 288 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n", 289 pss->package.count)); 290 291 pr->performance->state_count = pss->package.count; 292 pr->performance->states = 293 kmalloc(sizeof(struct acpi_processor_px) * pss->package.count, 294 GFP_KERNEL); 295 if (!pr->performance->states) { 296 result = -ENOMEM; 297 goto end; 298 } 299 300 for (i = 0; i < pr->performance->state_count; i++) { 301 302 struct acpi_processor_px *px = &(pr->performance->states[i]); 303 304 state.length = sizeof(struct acpi_processor_px); 305 state.pointer = px; 306 307 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); 308 309 status = acpi_extract_package(&(pss->package.elements[i]), 310 &format, &state); 311 if (ACPI_FAILURE(status)) { 312 ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data")); 313 result = -EFAULT; 314 kfree(pr->performance->states); 315 goto end; 316 } 317 318 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 319 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", 320 i, 321 (u32) px->core_frequency, 322 (u32) px->power, 323 (u32) px->transition_latency, 324 (u32) px->bus_master_latency, 325 (u32) px->control, (u32) px->status)); 326 327 /* 328 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq 329 */ 330 if (!px->core_frequency || 331 ((u32)(px->core_frequency * 1000) != 332 (px->core_frequency * 1000))) { 333 printk(KERN_ERR FW_BUG PREFIX 334 "Invalid BIOS _PSS frequency: 0x%llx MHz\n", 335 px->core_frequency); 336 result = -EFAULT; 337 kfree(pr->performance->states); 338 goto end; 339 } 340 } 341 342 end: 343 kfree(buffer.pointer); 344 345 return result; 346 } 347 348 static int acpi_processor_get_performance_info(struct acpi_processor *pr) 349 { 350 int result = 0; 351 acpi_status status = AE_OK; 352 acpi_handle handle = NULL; 353 354 if (!pr || !pr->performance || !pr->handle) 355 return -EINVAL; 356 357 status = acpi_get_handle(pr->handle, "_PCT", &handle); 358 if (ACPI_FAILURE(status)) { 359 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 360 "ACPI-based processor performance control unavailable\n")); 361 return -ENODEV; 362 } 363 364 result = acpi_processor_get_performance_control(pr); 365 if (result) 366 goto update_bios; 367 368 result = acpi_processor_get_performance_states(pr); 369 if (result) 370 goto update_bios; 371 372 return 0; 373 374 /* 375 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that 376 * the BIOS is older than the CPU and does not know its frequencies 377 */ 378 update_bios: 379 #ifdef CONFIG_X86 380 if (ACPI_SUCCESS(acpi_get_handle(pr->handle, "_PPC", &handle))){ 381 if(boot_cpu_has(X86_FEATURE_EST)) 382 printk(KERN_WARNING FW_BUG "BIOS needs update for CPU " 383 "frequency support\n"); 384 } 385 #endif 386 return result; 387 } 388 389 int acpi_processor_notify_smm(struct module *calling_module) 390 { 391 acpi_status status; 392 static int is_done = 0; 393 394 395 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 396 return -EBUSY; 397 398 if (!try_module_get(calling_module)) 399 return -EINVAL; 400 401 /* is_done is set to negative if an error occured, 402 * and to postitive if _no_ error occured, but SMM 403 * was already notified. This avoids double notification 404 * which might lead to unexpected results... 405 */ 406 if (is_done > 0) { 407 module_put(calling_module); 408 return 0; 409 } else if (is_done < 0) { 410 module_put(calling_module); 411 return is_done; 412 } 413 414 is_done = -EIO; 415 416 /* Can't write pstate_control to smi_command if either value is zero */ 417 if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) { 418 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n")); 419 module_put(calling_module); 420 return 0; 421 } 422 423 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 424 "Writing pstate_control [0x%x] to smi_command [0x%x]\n", 425 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command)); 426 427 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 428 (u32) acpi_gbl_FADT.pstate_control, 8); 429 if (ACPI_FAILURE(status)) { 430 ACPI_EXCEPTION((AE_INFO, status, 431 "Failed to write pstate_control [0x%x] to " 432 "smi_command [0x%x]", acpi_gbl_FADT.pstate_control, 433 acpi_gbl_FADT.smi_command)); 434 module_put(calling_module); 435 return status; 436 } 437 438 /* Success. If there's no _PPC, we need to fear nothing, so 439 * we can allow the cpufreq driver to be rmmod'ed. */ 440 is_done = 1; 441 442 if (!(acpi_processor_ppc_status & PPC_IN_USE)) 443 module_put(calling_module); 444 445 return 0; 446 } 447 448 EXPORT_SYMBOL(acpi_processor_notify_smm); 449 450 static int acpi_processor_get_psd(struct acpi_processor *pr) 451 { 452 int result = 0; 453 acpi_status status = AE_OK; 454 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 455 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; 456 struct acpi_buffer state = {0, NULL}; 457 union acpi_object *psd = NULL; 458 struct acpi_psd_package *pdomain; 459 460 status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer); 461 if (ACPI_FAILURE(status)) { 462 return -ENODEV; 463 } 464 465 psd = buffer.pointer; 466 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) { 467 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 468 result = -EFAULT; 469 goto end; 470 } 471 472 if (psd->package.count != 1) { 473 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 474 result = -EFAULT; 475 goto end; 476 } 477 478 pdomain = &(pr->performance->domain_info); 479 480 state.length = sizeof(struct acpi_psd_package); 481 state.pointer = pdomain; 482 483 status = acpi_extract_package(&(psd->package.elements[0]), 484 &format, &state); 485 if (ACPI_FAILURE(status)) { 486 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 487 result = -EFAULT; 488 goto end; 489 } 490 491 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { 492 printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n"); 493 result = -EFAULT; 494 goto end; 495 } 496 497 if (pdomain->revision != ACPI_PSD_REV0_REVISION) { 498 printk(KERN_ERR PREFIX "Unknown _PSD:revision\n"); 499 result = -EFAULT; 500 goto end; 501 } 502 503 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 504 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 505 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 506 printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n"); 507 result = -EFAULT; 508 goto end; 509 } 510 end: 511 kfree(buffer.pointer); 512 return result; 513 } 514 515 int acpi_processor_preregister_performance( 516 struct acpi_processor_performance *performance) 517 { 518 int count, count_target; 519 int retval = 0; 520 unsigned int i, j; 521 cpumask_var_t covered_cpus; 522 struct acpi_processor *pr; 523 struct acpi_psd_package *pdomain; 524 struct acpi_processor *match_pr; 525 struct acpi_psd_package *match_pdomain; 526 527 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 528 return -ENOMEM; 529 530 mutex_lock(&performance_mutex); 531 532 /* 533 * Check if another driver has already registered, and abort before 534 * changing pr->performance if it has. Check input data as well. 535 */ 536 for_each_possible_cpu(i) { 537 pr = per_cpu(processors, i); 538 if (!pr) { 539 /* Look only at processors in ACPI namespace */ 540 continue; 541 } 542 543 if (pr->performance) { 544 retval = -EBUSY; 545 goto err_out; 546 } 547 548 if (!performance || !per_cpu_ptr(performance, i)) { 549 retval = -EINVAL; 550 goto err_out; 551 } 552 } 553 554 /* Call _PSD for all CPUs */ 555 for_each_possible_cpu(i) { 556 pr = per_cpu(processors, i); 557 if (!pr) 558 continue; 559 560 pr->performance = per_cpu_ptr(performance, i); 561 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 562 if (acpi_processor_get_psd(pr)) { 563 retval = -EINVAL; 564 continue; 565 } 566 } 567 if (retval) 568 goto err_ret; 569 570 /* 571 * Now that we have _PSD data from all CPUs, lets setup P-state 572 * domain info. 573 */ 574 for_each_possible_cpu(i) { 575 pr = per_cpu(processors, i); 576 if (!pr) 577 continue; 578 579 if (cpumask_test_cpu(i, covered_cpus)) 580 continue; 581 582 pdomain = &(pr->performance->domain_info); 583 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 584 cpumask_set_cpu(i, covered_cpus); 585 if (pdomain->num_processors <= 1) 586 continue; 587 588 /* Validate the Domain info */ 589 count_target = pdomain->num_processors; 590 count = 1; 591 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) 592 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 593 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) 594 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW; 595 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) 596 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY; 597 598 for_each_possible_cpu(j) { 599 if (i == j) 600 continue; 601 602 match_pr = per_cpu(processors, j); 603 if (!match_pr) 604 continue; 605 606 match_pdomain = &(match_pr->performance->domain_info); 607 if (match_pdomain->domain != pdomain->domain) 608 continue; 609 610 /* Here i and j are in the same domain */ 611 612 if (match_pdomain->num_processors != count_target) { 613 retval = -EINVAL; 614 goto err_ret; 615 } 616 617 if (pdomain->coord_type != match_pdomain->coord_type) { 618 retval = -EINVAL; 619 goto err_ret; 620 } 621 622 cpumask_set_cpu(j, covered_cpus); 623 cpumask_set_cpu(j, pr->performance->shared_cpu_map); 624 count++; 625 } 626 627 for_each_possible_cpu(j) { 628 if (i == j) 629 continue; 630 631 match_pr = per_cpu(processors, j); 632 if (!match_pr) 633 continue; 634 635 match_pdomain = &(match_pr->performance->domain_info); 636 if (match_pdomain->domain != pdomain->domain) 637 continue; 638 639 match_pr->performance->shared_type = 640 pr->performance->shared_type; 641 cpumask_copy(match_pr->performance->shared_cpu_map, 642 pr->performance->shared_cpu_map); 643 } 644 } 645 646 err_ret: 647 for_each_possible_cpu(i) { 648 pr = per_cpu(processors, i); 649 if (!pr || !pr->performance) 650 continue; 651 652 /* Assume no coordination on any error parsing domain info */ 653 if (retval) { 654 cpumask_clear(pr->performance->shared_cpu_map); 655 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 656 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 657 } 658 pr->performance = NULL; /* Will be set for real in register */ 659 } 660 661 err_out: 662 mutex_unlock(&performance_mutex); 663 free_cpumask_var(covered_cpus); 664 return retval; 665 } 666 EXPORT_SYMBOL(acpi_processor_preregister_performance); 667 668 int 669 acpi_processor_register_performance(struct acpi_processor_performance 670 *performance, unsigned int cpu) 671 { 672 struct acpi_processor *pr; 673 674 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 675 return -EINVAL; 676 677 mutex_lock(&performance_mutex); 678 679 pr = per_cpu(processors, cpu); 680 if (!pr) { 681 mutex_unlock(&performance_mutex); 682 return -ENODEV; 683 } 684 685 if (pr->performance) { 686 mutex_unlock(&performance_mutex); 687 return -EBUSY; 688 } 689 690 WARN_ON(!performance); 691 692 pr->performance = performance; 693 694 if (acpi_processor_get_performance_info(pr)) { 695 pr->performance = NULL; 696 mutex_unlock(&performance_mutex); 697 return -EIO; 698 } 699 700 mutex_unlock(&performance_mutex); 701 return 0; 702 } 703 704 EXPORT_SYMBOL(acpi_processor_register_performance); 705 706 void 707 acpi_processor_unregister_performance(struct acpi_processor_performance 708 *performance, unsigned int cpu) 709 { 710 struct acpi_processor *pr; 711 712 mutex_lock(&performance_mutex); 713 714 pr = per_cpu(processors, cpu); 715 if (!pr) { 716 mutex_unlock(&performance_mutex); 717 return; 718 } 719 720 if (pr->performance) 721 kfree(pr->performance->states); 722 pr->performance = NULL; 723 724 mutex_unlock(&performance_mutex); 725 726 return; 727 } 728 729 EXPORT_SYMBOL(acpi_processor_unregister_performance); 730