1 /* 2 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $) 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * 11 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or (at 16 * your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, but 19 * WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 * General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License along 24 * with this program; if not, write to the Free Software Foundation, Inc., 25 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 26 * 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/cpufreq.h> 33 #include <linux/slab.h> 34 #include <linux/acpi.h> 35 #include <acpi/processor.h> 36 #ifdef CONFIG_X86 37 #include <asm/cpufeature.h> 38 #endif 39 40 #define PREFIX "ACPI: " 41 42 #define ACPI_PROCESSOR_CLASS "processor" 43 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" 44 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 45 ACPI_MODULE_NAME("processor_perflib"); 46 47 static DEFINE_MUTEX(performance_mutex); 48 49 /* 50 * _PPC support is implemented as a CPUfreq policy notifier: 51 * This means each time a CPUfreq driver registered also with 52 * the ACPI core is asked to change the speed policy, the maximum 53 * value is adjusted so that it is within the platform limit. 54 * 55 * Also, when a new platform limit value is detected, the CPUfreq 56 * policy is adjusted accordingly. 57 */ 58 59 /* ignore_ppc: 60 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet 61 * ignore _PPC 62 * 0 -> cpufreq low level drivers initialized -> consider _PPC values 63 * 1 -> ignore _PPC totally -> forced by user through boot param 64 */ 65 static int ignore_ppc = -1; 66 module_param(ignore_ppc, int, 0644); 67 MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ 68 "limited by BIOS, this should help"); 69 70 #define PPC_REGISTERED 1 71 #define PPC_IN_USE 2 72 73 static int acpi_processor_ppc_status; 74 75 static int acpi_processor_ppc_notifier(struct notifier_block *nb, 76 unsigned long event, void *data) 77 { 78 struct cpufreq_policy *policy = data; 79 struct acpi_processor *pr; 80 unsigned int ppc = 0; 81 82 if (event == CPUFREQ_START && ignore_ppc <= 0) { 83 ignore_ppc = 0; 84 return 0; 85 } 86 87 if (ignore_ppc) 88 return 0; 89 90 if (event != CPUFREQ_INCOMPATIBLE) 91 return 0; 92 93 mutex_lock(&performance_mutex); 94 95 pr = per_cpu(processors, policy->cpu); 96 if (!pr || !pr->performance) 97 goto out; 98 99 ppc = (unsigned int)pr->performance_platform_limit; 100 101 if (ppc >= pr->performance->state_count) 102 goto out; 103 104 cpufreq_verify_within_limits(policy, 0, 105 pr->performance->states[ppc]. 106 core_frequency * 1000); 107 108 out: 109 mutex_unlock(&performance_mutex); 110 111 return 0; 112 } 113 114 static struct notifier_block acpi_ppc_notifier_block = { 115 .notifier_call = acpi_processor_ppc_notifier, 116 }; 117 118 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 119 { 120 acpi_status status = 0; 121 unsigned long long ppc = 0; 122 123 124 if (!pr) 125 return -EINVAL; 126 127 /* 128 * _PPC indicates the maximum state currently supported by the platform 129 * (e.g. 0 = states 0..n; 1 = states 1..n; etc. 130 */ 131 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); 132 133 if (status != AE_NOT_FOUND) 134 acpi_processor_ppc_status |= PPC_IN_USE; 135 136 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 137 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC")); 138 return -ENODEV; 139 } 140 141 pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, 142 (int)ppc, ppc ? "" : "not"); 143 144 pr->performance_platform_limit = (int)ppc; 145 146 return 0; 147 } 148 149 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 150 /* 151 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status 152 * @handle: ACPI processor handle 153 * @status: the status code of _PPC evaluation 154 * 0: success. OSPM is now using the performance state specificed. 155 * 1: failure. OSPM has not changed the number of P-states in use 156 */ 157 static void acpi_processor_ppc_ost(acpi_handle handle, int status) 158 { 159 if (acpi_has_method(handle, "_OST")) 160 acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE, 161 status, NULL); 162 } 163 164 int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) 165 { 166 int ret; 167 168 if (ignore_ppc) { 169 /* 170 * Only when it is notification event, the _OST object 171 * will be evaluated. Otherwise it is skipped. 172 */ 173 if (event_flag) 174 acpi_processor_ppc_ost(pr->handle, 1); 175 return 0; 176 } 177 178 ret = acpi_processor_get_platform_limit(pr); 179 /* 180 * Only when it is notification event, the _OST object 181 * will be evaluated. Otherwise it is skipped. 182 */ 183 if (event_flag) { 184 if (ret < 0) 185 acpi_processor_ppc_ost(pr->handle, 1); 186 else 187 acpi_processor_ppc_ost(pr->handle, 0); 188 } 189 if (ret < 0) 190 return (ret); 191 else 192 return cpufreq_update_policy(pr->id); 193 } 194 195 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) 196 { 197 struct acpi_processor *pr; 198 199 pr = per_cpu(processors, cpu); 200 if (!pr || !pr->performance || !pr->performance->state_count) 201 return -ENODEV; 202 *limit = pr->performance->states[pr->performance_platform_limit]. 203 core_frequency * 1000; 204 return 0; 205 } 206 EXPORT_SYMBOL(acpi_processor_get_bios_limit); 207 208 void acpi_processor_ppc_init(void) 209 { 210 if (!cpufreq_register_notifier 211 (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER)) 212 acpi_processor_ppc_status |= PPC_REGISTERED; 213 else 214 printk(KERN_DEBUG 215 "Warning: Processor Platform Limit not supported.\n"); 216 } 217 218 void acpi_processor_ppc_exit(void) 219 { 220 if (acpi_processor_ppc_status & PPC_REGISTERED) 221 cpufreq_unregister_notifier(&acpi_ppc_notifier_block, 222 CPUFREQ_POLICY_NOTIFIER); 223 224 acpi_processor_ppc_status &= ~PPC_REGISTERED; 225 } 226 227 static int acpi_processor_get_performance_control(struct acpi_processor *pr) 228 { 229 int result = 0; 230 acpi_status status = 0; 231 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 232 union acpi_object *pct = NULL; 233 union acpi_object obj = { 0 }; 234 235 236 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); 237 if (ACPI_FAILURE(status)) { 238 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT")); 239 return -ENODEV; 240 } 241 242 pct = (union acpi_object *)buffer.pointer; 243 if (!pct || (pct->type != ACPI_TYPE_PACKAGE) 244 || (pct->package.count != 2)) { 245 printk(KERN_ERR PREFIX "Invalid _PCT data\n"); 246 result = -EFAULT; 247 goto end; 248 } 249 250 /* 251 * control_register 252 */ 253 254 obj = pct->package.elements[0]; 255 256 if ((obj.type != ACPI_TYPE_BUFFER) 257 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 258 || (obj.buffer.pointer == NULL)) { 259 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n"); 260 result = -EFAULT; 261 goto end; 262 } 263 memcpy(&pr->performance->control_register, obj.buffer.pointer, 264 sizeof(struct acpi_pct_register)); 265 266 /* 267 * status_register 268 */ 269 270 obj = pct->package.elements[1]; 271 272 if ((obj.type != ACPI_TYPE_BUFFER) 273 || (obj.buffer.length < sizeof(struct acpi_pct_register)) 274 || (obj.buffer.pointer == NULL)) { 275 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n"); 276 result = -EFAULT; 277 goto end; 278 } 279 280 memcpy(&pr->performance->status_register, obj.buffer.pointer, 281 sizeof(struct acpi_pct_register)); 282 283 end: 284 kfree(buffer.pointer); 285 286 return result; 287 } 288 289 #ifdef CONFIG_X86 290 /* 291 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding 292 * in their ACPI data. Calculate the real values and fix up the _PSS data. 293 */ 294 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) 295 { 296 u32 hi, lo, fid, did; 297 int index = px->control & 0x00000007; 298 299 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 300 return; 301 302 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) 303 || boot_cpu_data.x86 == 0x11) { 304 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi); 305 /* 306 * MSR C001_0064+: 307 * Bit 63: PstateEn. Read-write. If set, the P-state is valid. 308 */ 309 if (!(hi & BIT(31))) 310 return; 311 312 fid = lo & 0x3f; 313 did = (lo >> 6) & 7; 314 if (boot_cpu_data.x86 == 0x10) 315 px->core_frequency = (100 * (fid + 0x10)) >> did; 316 else 317 px->core_frequency = (100 * (fid + 8)) >> did; 318 } 319 } 320 #else 321 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {}; 322 #endif 323 324 static int acpi_processor_get_performance_states(struct acpi_processor *pr) 325 { 326 int result = 0; 327 acpi_status status = AE_OK; 328 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 329 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" }; 330 struct acpi_buffer state = { 0, NULL }; 331 union acpi_object *pss = NULL; 332 int i; 333 int last_invalid = -1; 334 335 336 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 337 if (ACPI_FAILURE(status)) { 338 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS")); 339 return -ENODEV; 340 } 341 342 pss = buffer.pointer; 343 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) { 344 printk(KERN_ERR PREFIX "Invalid _PSS data\n"); 345 result = -EFAULT; 346 goto end; 347 } 348 349 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n", 350 pss->package.count)); 351 352 pr->performance->state_count = pss->package.count; 353 pr->performance->states = 354 kmalloc(sizeof(struct acpi_processor_px) * pss->package.count, 355 GFP_KERNEL); 356 if (!pr->performance->states) { 357 result = -ENOMEM; 358 goto end; 359 } 360 361 for (i = 0; i < pr->performance->state_count; i++) { 362 363 struct acpi_processor_px *px = &(pr->performance->states[i]); 364 365 state.length = sizeof(struct acpi_processor_px); 366 state.pointer = px; 367 368 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); 369 370 status = acpi_extract_package(&(pss->package.elements[i]), 371 &format, &state); 372 if (ACPI_FAILURE(status)) { 373 ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data")); 374 result = -EFAULT; 375 kfree(pr->performance->states); 376 goto end; 377 } 378 379 amd_fixup_frequency(px, i); 380 381 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 382 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", 383 i, 384 (u32) px->core_frequency, 385 (u32) px->power, 386 (u32) px->transition_latency, 387 (u32) px->bus_master_latency, 388 (u32) px->control, (u32) px->status)); 389 390 /* 391 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq 392 */ 393 if (!px->core_frequency || 394 ((u32)(px->core_frequency * 1000) != 395 (px->core_frequency * 1000))) { 396 printk(KERN_ERR FW_BUG PREFIX 397 "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n", 398 pr->id, px->core_frequency); 399 if (last_invalid == -1) 400 last_invalid = i; 401 } else { 402 if (last_invalid != -1) { 403 /* 404 * Copy this valid entry over last_invalid entry 405 */ 406 memcpy(&(pr->performance->states[last_invalid]), 407 px, sizeof(struct acpi_processor_px)); 408 ++last_invalid; 409 } 410 } 411 } 412 413 if (last_invalid == 0) { 414 printk(KERN_ERR FW_BUG PREFIX 415 "No valid BIOS _PSS frequency found for processor %d\n", pr->id); 416 result = -EFAULT; 417 kfree(pr->performance->states); 418 pr->performance->states = NULL; 419 } 420 421 if (last_invalid > 0) 422 pr->performance->state_count = last_invalid; 423 424 end: 425 kfree(buffer.pointer); 426 427 return result; 428 } 429 430 int acpi_processor_get_performance_info(struct acpi_processor *pr) 431 { 432 int result = 0; 433 434 if (!pr || !pr->performance || !pr->handle) 435 return -EINVAL; 436 437 if (!acpi_has_method(pr->handle, "_PCT")) { 438 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 439 "ACPI-based processor performance control unavailable\n")); 440 return -ENODEV; 441 } 442 443 result = acpi_processor_get_performance_control(pr); 444 if (result) 445 goto update_bios; 446 447 result = acpi_processor_get_performance_states(pr); 448 if (result) 449 goto update_bios; 450 451 /* We need to call _PPC once when cpufreq starts */ 452 if (ignore_ppc != 1) 453 result = acpi_processor_get_platform_limit(pr); 454 455 return result; 456 457 /* 458 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that 459 * the BIOS is older than the CPU and does not know its frequencies 460 */ 461 update_bios: 462 #ifdef CONFIG_X86 463 if (acpi_has_method(pr->handle, "_PPC")) { 464 if(boot_cpu_has(X86_FEATURE_EST)) 465 printk(KERN_WARNING FW_BUG "BIOS needs update for CPU " 466 "frequency support\n"); 467 } 468 #endif 469 return result; 470 } 471 EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info); 472 int acpi_processor_notify_smm(struct module *calling_module) 473 { 474 acpi_status status; 475 static int is_done = 0; 476 477 478 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 479 return -EBUSY; 480 481 if (!try_module_get(calling_module)) 482 return -EINVAL; 483 484 /* is_done is set to negative if an error occurred, 485 * and to postitive if _no_ error occurred, but SMM 486 * was already notified. This avoids double notification 487 * which might lead to unexpected results... 488 */ 489 if (is_done > 0) { 490 module_put(calling_module); 491 return 0; 492 } else if (is_done < 0) { 493 module_put(calling_module); 494 return is_done; 495 } 496 497 is_done = -EIO; 498 499 /* Can't write pstate_control to smi_command if either value is zero */ 500 if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) { 501 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n")); 502 module_put(calling_module); 503 return 0; 504 } 505 506 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 507 "Writing pstate_control [0x%x] to smi_command [0x%x]\n", 508 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command)); 509 510 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 511 (u32) acpi_gbl_FADT.pstate_control, 8); 512 if (ACPI_FAILURE(status)) { 513 ACPI_EXCEPTION((AE_INFO, status, 514 "Failed to write pstate_control [0x%x] to " 515 "smi_command [0x%x]", acpi_gbl_FADT.pstate_control, 516 acpi_gbl_FADT.smi_command)); 517 module_put(calling_module); 518 return status; 519 } 520 521 /* Success. If there's no _PPC, we need to fear nothing, so 522 * we can allow the cpufreq driver to be rmmod'ed. */ 523 is_done = 1; 524 525 if (!(acpi_processor_ppc_status & PPC_IN_USE)) 526 module_put(calling_module); 527 528 return 0; 529 } 530 531 EXPORT_SYMBOL(acpi_processor_notify_smm); 532 533 static int acpi_processor_get_psd(struct acpi_processor *pr) 534 { 535 int result = 0; 536 acpi_status status = AE_OK; 537 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 538 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; 539 struct acpi_buffer state = {0, NULL}; 540 union acpi_object *psd = NULL; 541 struct acpi_psd_package *pdomain; 542 543 status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer); 544 if (ACPI_FAILURE(status)) { 545 return -ENODEV; 546 } 547 548 psd = buffer.pointer; 549 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) { 550 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 551 result = -EFAULT; 552 goto end; 553 } 554 555 if (psd->package.count != 1) { 556 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 557 result = -EFAULT; 558 goto end; 559 } 560 561 pdomain = &(pr->performance->domain_info); 562 563 state.length = sizeof(struct acpi_psd_package); 564 state.pointer = pdomain; 565 566 status = acpi_extract_package(&(psd->package.elements[0]), 567 &format, &state); 568 if (ACPI_FAILURE(status)) { 569 printk(KERN_ERR PREFIX "Invalid _PSD data\n"); 570 result = -EFAULT; 571 goto end; 572 } 573 574 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { 575 printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n"); 576 result = -EFAULT; 577 goto end; 578 } 579 580 if (pdomain->revision != ACPI_PSD_REV0_REVISION) { 581 printk(KERN_ERR PREFIX "Unknown _PSD:revision\n"); 582 result = -EFAULT; 583 goto end; 584 } 585 586 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 587 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 588 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 589 printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n"); 590 result = -EFAULT; 591 goto end; 592 } 593 end: 594 kfree(buffer.pointer); 595 return result; 596 } 597 598 int acpi_processor_preregister_performance( 599 struct acpi_processor_performance __percpu *performance) 600 { 601 int count_target; 602 int retval = 0; 603 unsigned int i, j; 604 cpumask_var_t covered_cpus; 605 struct acpi_processor *pr; 606 struct acpi_psd_package *pdomain; 607 struct acpi_processor *match_pr; 608 struct acpi_psd_package *match_pdomain; 609 610 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 611 return -ENOMEM; 612 613 mutex_lock(&performance_mutex); 614 615 /* 616 * Check if another driver has already registered, and abort before 617 * changing pr->performance if it has. Check input data as well. 618 */ 619 for_each_possible_cpu(i) { 620 pr = per_cpu(processors, i); 621 if (!pr) { 622 /* Look only at processors in ACPI namespace */ 623 continue; 624 } 625 626 if (pr->performance) { 627 retval = -EBUSY; 628 goto err_out; 629 } 630 631 if (!performance || !per_cpu_ptr(performance, i)) { 632 retval = -EINVAL; 633 goto err_out; 634 } 635 } 636 637 /* Call _PSD for all CPUs */ 638 for_each_possible_cpu(i) { 639 pr = per_cpu(processors, i); 640 if (!pr) 641 continue; 642 643 pr->performance = per_cpu_ptr(performance, i); 644 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 645 if (acpi_processor_get_psd(pr)) { 646 retval = -EINVAL; 647 continue; 648 } 649 } 650 if (retval) 651 goto err_ret; 652 653 /* 654 * Now that we have _PSD data from all CPUs, lets setup P-state 655 * domain info. 656 */ 657 for_each_possible_cpu(i) { 658 pr = per_cpu(processors, i); 659 if (!pr) 660 continue; 661 662 if (cpumask_test_cpu(i, covered_cpus)) 663 continue; 664 665 pdomain = &(pr->performance->domain_info); 666 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 667 cpumask_set_cpu(i, covered_cpus); 668 if (pdomain->num_processors <= 1) 669 continue; 670 671 /* Validate the Domain info */ 672 count_target = pdomain->num_processors; 673 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) 674 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 675 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) 676 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW; 677 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) 678 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY; 679 680 for_each_possible_cpu(j) { 681 if (i == j) 682 continue; 683 684 match_pr = per_cpu(processors, j); 685 if (!match_pr) 686 continue; 687 688 match_pdomain = &(match_pr->performance->domain_info); 689 if (match_pdomain->domain != pdomain->domain) 690 continue; 691 692 /* Here i and j are in the same domain */ 693 694 if (match_pdomain->num_processors != count_target) { 695 retval = -EINVAL; 696 goto err_ret; 697 } 698 699 if (pdomain->coord_type != match_pdomain->coord_type) { 700 retval = -EINVAL; 701 goto err_ret; 702 } 703 704 cpumask_set_cpu(j, covered_cpus); 705 cpumask_set_cpu(j, pr->performance->shared_cpu_map); 706 } 707 708 for_each_possible_cpu(j) { 709 if (i == j) 710 continue; 711 712 match_pr = per_cpu(processors, j); 713 if (!match_pr) 714 continue; 715 716 match_pdomain = &(match_pr->performance->domain_info); 717 if (match_pdomain->domain != pdomain->domain) 718 continue; 719 720 match_pr->performance->shared_type = 721 pr->performance->shared_type; 722 cpumask_copy(match_pr->performance->shared_cpu_map, 723 pr->performance->shared_cpu_map); 724 } 725 } 726 727 err_ret: 728 for_each_possible_cpu(i) { 729 pr = per_cpu(processors, i); 730 if (!pr || !pr->performance) 731 continue; 732 733 /* Assume no coordination on any error parsing domain info */ 734 if (retval) { 735 cpumask_clear(pr->performance->shared_cpu_map); 736 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 737 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 738 } 739 pr->performance = NULL; /* Will be set for real in register */ 740 } 741 742 err_out: 743 mutex_unlock(&performance_mutex); 744 free_cpumask_var(covered_cpus); 745 return retval; 746 } 747 EXPORT_SYMBOL(acpi_processor_preregister_performance); 748 749 int 750 acpi_processor_register_performance(struct acpi_processor_performance 751 *performance, unsigned int cpu) 752 { 753 struct acpi_processor *pr; 754 755 if (!(acpi_processor_ppc_status & PPC_REGISTERED)) 756 return -EINVAL; 757 758 mutex_lock(&performance_mutex); 759 760 pr = per_cpu(processors, cpu); 761 if (!pr) { 762 mutex_unlock(&performance_mutex); 763 return -ENODEV; 764 } 765 766 if (pr->performance) { 767 mutex_unlock(&performance_mutex); 768 return -EBUSY; 769 } 770 771 WARN_ON(!performance); 772 773 pr->performance = performance; 774 775 if (acpi_processor_get_performance_info(pr)) { 776 pr->performance = NULL; 777 mutex_unlock(&performance_mutex); 778 return -EIO; 779 } 780 781 mutex_unlock(&performance_mutex); 782 return 0; 783 } 784 785 EXPORT_SYMBOL(acpi_processor_register_performance); 786 787 void 788 acpi_processor_unregister_performance(struct acpi_processor_performance 789 *performance, unsigned int cpu) 790 { 791 struct acpi_processor *pr; 792 793 mutex_lock(&performance_mutex); 794 795 pr = per_cpu(processors, cpu); 796 if (!pr) { 797 mutex_unlock(&performance_mutex); 798 return; 799 } 800 801 if (pr->performance) 802 kfree(pr->performance->states); 803 pr->performance = NULL; 804 805 mutex_unlock(&performance_mutex); 806 807 return; 808 } 809 810 EXPORT_SYMBOL(acpi_processor_unregister_performance); 811