1 /* 2 * (c) 2003-2012 Advanced Micro Devices, Inc. 3 * Your use of this code is subject to the terms and conditions of the 4 * GNU general public license version 2. See "COPYING" or 5 * http://www.gnu.org/licenses/gpl.html 6 * 7 * Maintainer: 8 * Andreas Herrmann <andreas.herrmann3@amd.com> 9 * 10 * Based on the powernow-k7.c module written by Dave Jones. 11 * (C) 2003 Dave Jones on behalf of SuSE Labs 12 * (C) 2004 Dominik Brodowski <linux@brodo.de> 13 * (C) 2004 Pavel Machek <pavel@ucw.cz> 14 * Licensed under the terms of the GNU GPL License version 2. 15 * Based upon datasheets & sample CPUs kindly provided by AMD. 16 * 17 * Valuable input gratefully received from Dave Jones, Pavel Machek, 18 * Dominik Brodowski, Jacob Shin, and others. 19 * Originally developed by Paul Devriendt. 20 * 21 * Processor information obtained from Chapter 9 (Power and Thermal 22 * Management) of the "BIOS and Kernel Developer's Guide (BKDG) for 23 * the AMD Athlon 64 and AMD Opteron Processors" and section "2.x 24 * Power Management" in BKDGs for newer AMD CPU families. 25 * 26 * Tables for specific CPUs can be inferred from AMD's processor 27 * power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf) 28 */ 29 30 #include <linux/kernel.h> 31 #include <linux/smp.h> 32 #include <linux/module.h> 33 #include <linux/init.h> 34 #include <linux/cpufreq.h> 35 #include <linux/slab.h> 36 #include <linux/string.h> 37 #include <linux/cpumask.h> 38 #include <linux/sched.h> /* for current / set_cpus_allowed() */ 39 #include <linux/io.h> 40 #include <linux/delay.h> 41 42 #include <asm/msr.h> 43 44 #include <linux/acpi.h> 45 #include <linux/mutex.h> 46 #include <acpi/processor.h> 47 48 #define PFX "powernow-k8: " 49 #define VERSION "version 2.20.00" 50 #include "powernow-k8.h" 51 #include "mperf.h" 52 53 /* serialize freq changes */ 54 static DEFINE_MUTEX(fidvid_mutex); 55 56 static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); 57 58 static int cpu_family = CPU_OPTERON; 59 60 /* array to map SW pstate number to acpi state */ 61 static u32 ps_to_as[8]; 62 63 /* core performance boost */ 64 static bool cpb_capable, cpb_enabled; 65 static struct msr __percpu *msrs; 66 67 static struct cpufreq_driver cpufreq_amd64_driver; 68 69 #ifndef CONFIG_SMP 70 static inline const struct cpumask *cpu_core_mask(int cpu) 71 { 72 return cpumask_of(0); 73 } 74 #endif 75 76 /* Return a frequency in MHz, given an input fid */ 77 static u32 find_freq_from_fid(u32 fid) 78 { 79 return 800 + (fid * 100); 80 } 81 82 /* Return a frequency in KHz, given an input fid */ 83 static u32 find_khz_freq_from_fid(u32 fid) 84 { 85 return 1000 * find_freq_from_fid(fid); 86 } 87 88 static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, 89 u32 pstate) 90 { 91 return data[ps_to_as[pstate]].frequency; 92 } 93 94 /* Return the vco fid for an input fid 95 * 96 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids 97 * only from corresponding high fids. This returns "high" fid corresponding to 98 * "low" one. 99 */ 100 static u32 convert_fid_to_vco_fid(u32 fid) 101 { 102 if (fid < HI_FID_TABLE_BOTTOM) 103 return 8 + (2 * fid); 104 else 105 return fid; 106 } 107 108 /* 109 * Return 1 if the pending bit is set. Unless we just instructed the processor 110 * to transition to a new state, seeing this bit set is really bad news. 111 */ 112 static int pending_bit_stuck(void) 113 { 114 u32 lo, hi; 115 116 if (cpu_family == CPU_HW_PSTATE) 117 return 0; 118 119 rdmsr(MSR_FIDVID_STATUS, lo, hi); 120 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; 121 } 122 123 /* 124 * Update the global current fid / vid values from the status msr. 125 * Returns 1 on error. 126 */ 127 static int query_current_values_with_pending_wait(struct powernow_k8_data *data) 128 { 129 u32 lo, hi; 130 u32 i = 0; 131 132 if (cpu_family == CPU_HW_PSTATE) { 133 rdmsr(MSR_PSTATE_STATUS, lo, hi); 134 i = lo & HW_PSTATE_MASK; 135 data->currpstate = i; 136 137 /* 138 * a workaround for family 11h erratum 311 might cause 139 * an "out-of-range Pstate if the core is in Pstate-0 140 */ 141 if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps)) 142 data->currpstate = HW_PSTATE_0; 143 144 return 0; 145 } 146 do { 147 if (i++ > 10000) { 148 pr_debug("detected change pending stuck\n"); 149 return 1; 150 } 151 rdmsr(MSR_FIDVID_STATUS, lo, hi); 152 } while (lo & MSR_S_LO_CHANGE_PENDING); 153 154 data->currvid = hi & MSR_S_HI_CURRENT_VID; 155 data->currfid = lo & MSR_S_LO_CURRENT_FID; 156 157 return 0; 158 } 159 160 /* the isochronous relief time */ 161 static void count_off_irt(struct powernow_k8_data *data) 162 { 163 udelay((1 << data->irt) * 10); 164 return; 165 } 166 167 /* the voltage stabilization time */ 168 static void count_off_vst(struct powernow_k8_data *data) 169 { 170 udelay(data->vstable * VST_UNITS_20US); 171 return; 172 } 173 174 /* need to init the control msr to a safe value (for each cpu) */ 175 static void fidvid_msr_init(void) 176 { 177 u32 lo, hi; 178 u8 fid, vid; 179 180 rdmsr(MSR_FIDVID_STATUS, lo, hi); 181 vid = hi & MSR_S_HI_CURRENT_VID; 182 fid = lo & MSR_S_LO_CURRENT_FID; 183 lo = fid | (vid << MSR_C_LO_VID_SHIFT); 184 hi = MSR_C_HI_STP_GNT_BENIGN; 185 pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); 186 wrmsr(MSR_FIDVID_CTL, lo, hi); 187 } 188 189 /* write the new fid value along with the other control fields to the msr */ 190 static int write_new_fid(struct powernow_k8_data *data, u32 fid) 191 { 192 u32 lo; 193 u32 savevid = data->currvid; 194 u32 i = 0; 195 196 if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) { 197 printk(KERN_ERR PFX "internal error - overflow on fid write\n"); 198 return 1; 199 } 200 201 lo = fid; 202 lo |= (data->currvid << MSR_C_LO_VID_SHIFT); 203 lo |= MSR_C_LO_INIT_FID_VID; 204 205 pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n", 206 fid, lo, data->plllock * PLL_LOCK_CONVERSION); 207 208 do { 209 wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION); 210 if (i++ > 100) { 211 printk(KERN_ERR PFX 212 "Hardware error - pending bit very stuck - " 213 "no further pstate changes possible\n"); 214 return 1; 215 } 216 } while (query_current_values_with_pending_wait(data)); 217 218 count_off_irt(data); 219 220 if (savevid != data->currvid) { 221 printk(KERN_ERR PFX 222 "vid change on fid trans, old 0x%x, new 0x%x\n", 223 savevid, data->currvid); 224 return 1; 225 } 226 227 if (fid != data->currfid) { 228 printk(KERN_ERR PFX 229 "fid trans failed, fid 0x%x, curr 0x%x\n", fid, 230 data->currfid); 231 return 1; 232 } 233 234 return 0; 235 } 236 237 /* Write a new vid to the hardware */ 238 static int write_new_vid(struct powernow_k8_data *data, u32 vid) 239 { 240 u32 lo; 241 u32 savefid = data->currfid; 242 int i = 0; 243 244 if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) { 245 printk(KERN_ERR PFX "internal error - overflow on vid write\n"); 246 return 1; 247 } 248 249 lo = data->currfid; 250 lo |= (vid << MSR_C_LO_VID_SHIFT); 251 lo |= MSR_C_LO_INIT_FID_VID; 252 253 pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n", 254 vid, lo, STOP_GRANT_5NS); 255 256 do { 257 wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); 258 if (i++ > 100) { 259 printk(KERN_ERR PFX "internal error - pending bit " 260 "very stuck - no further pstate " 261 "changes possible\n"); 262 return 1; 263 } 264 } while (query_current_values_with_pending_wait(data)); 265 266 if (savefid != data->currfid) { 267 printk(KERN_ERR PFX "fid changed on vid trans, old " 268 "0x%x new 0x%x\n", 269 savefid, data->currfid); 270 return 1; 271 } 272 273 if (vid != data->currvid) { 274 printk(KERN_ERR PFX "vid trans failed, vid 0x%x, " 275 "curr 0x%x\n", 276 vid, data->currvid); 277 return 1; 278 } 279 280 return 0; 281 } 282 283 /* 284 * Reduce the vid by the max of step or reqvid. 285 * Decreasing vid codes represent increasing voltages: 286 * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off. 287 */ 288 static int decrease_vid_code_by_step(struct powernow_k8_data *data, 289 u32 reqvid, u32 step) 290 { 291 if ((data->currvid - reqvid) > step) 292 reqvid = data->currvid - step; 293 294 if (write_new_vid(data, reqvid)) 295 return 1; 296 297 count_off_vst(data); 298 299 return 0; 300 } 301 302 /* Change hardware pstate by single MSR write */ 303 static int transition_pstate(struct powernow_k8_data *data, u32 pstate) 304 { 305 wrmsr(MSR_PSTATE_CTRL, pstate, 0); 306 data->currpstate = pstate; 307 return 0; 308 } 309 310 /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ 311 static int transition_fid_vid(struct powernow_k8_data *data, 312 u32 reqfid, u32 reqvid) 313 { 314 if (core_voltage_pre_transition(data, reqvid, reqfid)) 315 return 1; 316 317 if (core_frequency_transition(data, reqfid)) 318 return 1; 319 320 if (core_voltage_post_transition(data, reqvid)) 321 return 1; 322 323 if (query_current_values_with_pending_wait(data)) 324 return 1; 325 326 if ((reqfid != data->currfid) || (reqvid != data->currvid)) { 327 printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, " 328 "curr 0x%x 0x%x\n", 329 smp_processor_id(), 330 reqfid, reqvid, data->currfid, data->currvid); 331 return 1; 332 } 333 334 pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", 335 smp_processor_id(), data->currfid, data->currvid); 336 337 return 0; 338 } 339 340 /* Phase 1 - core voltage transition ... setup voltage */ 341 static int core_voltage_pre_transition(struct powernow_k8_data *data, 342 u32 reqvid, u32 reqfid) 343 { 344 u32 rvosteps = data->rvo; 345 u32 savefid = data->currfid; 346 u32 maxvid, lo, rvomult = 1; 347 348 pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " 349 "reqvid 0x%x, rvo 0x%x\n", 350 smp_processor_id(), 351 data->currfid, data->currvid, reqvid, data->rvo); 352 353 if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP)) 354 rvomult = 2; 355 rvosteps *= rvomult; 356 rdmsr(MSR_FIDVID_STATUS, lo, maxvid); 357 maxvid = 0x1f & (maxvid >> 16); 358 pr_debug("ph1 maxvid=0x%x\n", maxvid); 359 if (reqvid < maxvid) /* lower numbers are higher voltages */ 360 reqvid = maxvid; 361 362 while (data->currvid > reqvid) { 363 pr_debug("ph1: curr 0x%x, req vid 0x%x\n", 364 data->currvid, reqvid); 365 if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) 366 return 1; 367 } 368 369 while ((rvosteps > 0) && 370 ((rvomult * data->rvo + data->currvid) > reqvid)) { 371 if (data->currvid == maxvid) { 372 rvosteps = 0; 373 } else { 374 pr_debug("ph1: changing vid for rvo, req 0x%x\n", 375 data->currvid - 1); 376 if (decrease_vid_code_by_step(data, data->currvid-1, 1)) 377 return 1; 378 rvosteps--; 379 } 380 } 381 382 if (query_current_values_with_pending_wait(data)) 383 return 1; 384 385 if (savefid != data->currfid) { 386 printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", 387 data->currfid); 388 return 1; 389 } 390 391 pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n", 392 data->currfid, data->currvid); 393 394 return 0; 395 } 396 397 /* Phase 2 - core frequency transition */ 398 static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) 399 { 400 u32 vcoreqfid, vcocurrfid, vcofiddiff; 401 u32 fid_interval, savevid = data->currvid; 402 403 if (data->currfid == reqfid) { 404 printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", 405 data->currfid); 406 return 0; 407 } 408 409 pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " 410 "reqfid 0x%x\n", 411 smp_processor_id(), 412 data->currfid, data->currvid, reqfid); 413 414 vcoreqfid = convert_fid_to_vco_fid(reqfid); 415 vcocurrfid = convert_fid_to_vco_fid(data->currfid); 416 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid 417 : vcoreqfid - vcocurrfid; 418 419 if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP)) 420 vcofiddiff = 0; 421 422 while (vcofiddiff > 2) { 423 (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2); 424 425 if (reqfid > data->currfid) { 426 if (data->currfid > LO_FID_TABLE_TOP) { 427 if (write_new_fid(data, 428 data->currfid + fid_interval)) 429 return 1; 430 } else { 431 if (write_new_fid 432 (data, 433 2 + convert_fid_to_vco_fid(data->currfid))) 434 return 1; 435 } 436 } else { 437 if (write_new_fid(data, data->currfid - fid_interval)) 438 return 1; 439 } 440 441 vcocurrfid = convert_fid_to_vco_fid(data->currfid); 442 vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid 443 : vcoreqfid - vcocurrfid; 444 } 445 446 if (write_new_fid(data, reqfid)) 447 return 1; 448 449 if (query_current_values_with_pending_wait(data)) 450 return 1; 451 452 if (data->currfid != reqfid) { 453 printk(KERN_ERR PFX 454 "ph2: mismatch, failed fid transition, " 455 "curr 0x%x, req 0x%x\n", 456 data->currfid, reqfid); 457 return 1; 458 } 459 460 if (savevid != data->currvid) { 461 printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n", 462 savevid, data->currvid); 463 return 1; 464 } 465 466 pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n", 467 data->currfid, data->currvid); 468 469 return 0; 470 } 471 472 /* Phase 3 - core voltage transition flow ... jump to the final vid. */ 473 static int core_voltage_post_transition(struct powernow_k8_data *data, 474 u32 reqvid) 475 { 476 u32 savefid = data->currfid; 477 u32 savereqvid = reqvid; 478 479 pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", 480 smp_processor_id(), 481 data->currfid, data->currvid); 482 483 if (reqvid != data->currvid) { 484 if (write_new_vid(data, reqvid)) 485 return 1; 486 487 if (savefid != data->currfid) { 488 printk(KERN_ERR PFX 489 "ph3: bad fid change, save 0x%x, curr 0x%x\n", 490 savefid, data->currfid); 491 return 1; 492 } 493 494 if (data->currvid != reqvid) { 495 printk(KERN_ERR PFX 496 "ph3: failed vid transition\n, " 497 "req 0x%x, curr 0x%x", 498 reqvid, data->currvid); 499 return 1; 500 } 501 } 502 503 if (query_current_values_with_pending_wait(data)) 504 return 1; 505 506 if (savereqvid != data->currvid) { 507 pr_debug("ph3 failed, currvid 0x%x\n", data->currvid); 508 return 1; 509 } 510 511 if (savefid != data->currfid) { 512 pr_debug("ph3 failed, currfid changed 0x%x\n", 513 data->currfid); 514 return 1; 515 } 516 517 pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n", 518 data->currfid, data->currvid); 519 520 return 0; 521 } 522 523 static void check_supported_cpu(void *_rc) 524 { 525 u32 eax, ebx, ecx, edx; 526 int *rc = _rc; 527 528 *rc = -ENODEV; 529 530 if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD) 531 return; 532 533 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 534 if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) && 535 ((eax & CPUID_XFAM) < CPUID_XFAM_10H)) 536 return; 537 538 if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { 539 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || 540 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { 541 printk(KERN_INFO PFX 542 "Processor cpuid %x not supported\n", eax); 543 return; 544 } 545 546 eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); 547 if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { 548 printk(KERN_INFO PFX 549 "No frequency change capabilities detected\n"); 550 return; 551 } 552 553 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); 554 if ((edx & P_STATE_TRANSITION_CAPABLE) 555 != P_STATE_TRANSITION_CAPABLE) { 556 printk(KERN_INFO PFX 557 "Power state transitions not supported\n"); 558 return; 559 } 560 } else { /* must be a HW Pstate capable processor */ 561 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); 562 if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE) 563 cpu_family = CPU_HW_PSTATE; 564 else 565 return; 566 } 567 568 *rc = 0; 569 } 570 571 static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, 572 u8 maxvid) 573 { 574 unsigned int j; 575 u8 lastfid = 0xff; 576 577 for (j = 0; j < data->numps; j++) { 578 if (pst[j].vid > LEAST_VID) { 579 printk(KERN_ERR FW_BUG PFX "vid %d invalid : 0x%x\n", 580 j, pst[j].vid); 581 return -EINVAL; 582 } 583 if (pst[j].vid < data->rvo) { 584 /* vid + rvo >= 0 */ 585 printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate" 586 " %d\n", j); 587 return -ENODEV; 588 } 589 if (pst[j].vid < maxvid + data->rvo) { 590 /* vid + rvo >= maxvid */ 591 printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate" 592 " %d\n", j); 593 return -ENODEV; 594 } 595 if (pst[j].fid > MAX_FID) { 596 printk(KERN_ERR FW_BUG PFX "maxfid exceeded with pstate" 597 " %d\n", j); 598 return -ENODEV; 599 } 600 if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) { 601 /* Only first fid is allowed to be in "low" range */ 602 printk(KERN_ERR FW_BUG PFX "two low fids - %d : " 603 "0x%x\n", j, pst[j].fid); 604 return -EINVAL; 605 } 606 if (pst[j].fid < lastfid) 607 lastfid = pst[j].fid; 608 } 609 if (lastfid & 1) { 610 printk(KERN_ERR FW_BUG PFX "lastfid invalid\n"); 611 return -EINVAL; 612 } 613 if (lastfid > LO_FID_TABLE_TOP) 614 printk(KERN_INFO FW_BUG PFX 615 "first fid not from lo freq table\n"); 616 617 return 0; 618 } 619 620 static void invalidate_entry(struct cpufreq_frequency_table *powernow_table, 621 unsigned int entry) 622 { 623 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; 624 } 625 626 static void print_basics(struct powernow_k8_data *data) 627 { 628 int j; 629 for (j = 0; j < data->numps; j++) { 630 if (data->powernow_table[j].frequency != 631 CPUFREQ_ENTRY_INVALID) { 632 if (cpu_family == CPU_HW_PSTATE) { 633 printk(KERN_INFO PFX 634 " %d : pstate %d (%d MHz)\n", j, 635 data->powernow_table[j].index, 636 data->powernow_table[j].frequency/1000); 637 } else { 638 printk(KERN_INFO PFX 639 "fid 0x%x (%d MHz), vid 0x%x\n", 640 data->powernow_table[j].index & 0xff, 641 data->powernow_table[j].frequency/1000, 642 data->powernow_table[j].index >> 8); 643 } 644 } 645 } 646 if (data->batps) 647 printk(KERN_INFO PFX "Only %d pstates on battery\n", 648 data->batps); 649 } 650 651 static u32 freq_from_fid_did(u32 fid, u32 did) 652 { 653 u32 mhz = 0; 654 655 if (boot_cpu_data.x86 == 0x10) 656 mhz = (100 * (fid + 0x10)) >> did; 657 else if (boot_cpu_data.x86 == 0x11) 658 mhz = (100 * (fid + 8)) >> did; 659 else 660 BUG(); 661 662 return mhz * 1000; 663 } 664 665 static int fill_powernow_table(struct powernow_k8_data *data, 666 struct pst_s *pst, u8 maxvid) 667 { 668 struct cpufreq_frequency_table *powernow_table; 669 unsigned int j; 670 671 if (data->batps) { 672 /* use ACPI support to get full speed on mains power */ 673 printk(KERN_WARNING PFX 674 "Only %d pstates usable (use ACPI driver for full " 675 "range\n", data->batps); 676 data->numps = data->batps; 677 } 678 679 for (j = 1; j < data->numps; j++) { 680 if (pst[j-1].fid >= pst[j].fid) { 681 printk(KERN_ERR PFX "PST out of sequence\n"); 682 return -EINVAL; 683 } 684 } 685 686 if (data->numps < 2) { 687 printk(KERN_ERR PFX "no p states to transition\n"); 688 return -ENODEV; 689 } 690 691 if (check_pst_table(data, pst, maxvid)) 692 return -EINVAL; 693 694 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) 695 * (data->numps + 1)), GFP_KERNEL); 696 if (!powernow_table) { 697 printk(KERN_ERR PFX "powernow_table memory alloc failure\n"); 698 return -ENOMEM; 699 } 700 701 for (j = 0; j < data->numps; j++) { 702 int freq; 703 powernow_table[j].index = pst[j].fid; /* lower 8 bits */ 704 powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */ 705 freq = find_khz_freq_from_fid(pst[j].fid); 706 powernow_table[j].frequency = freq; 707 } 708 powernow_table[data->numps].frequency = CPUFREQ_TABLE_END; 709 powernow_table[data->numps].index = 0; 710 711 if (query_current_values_with_pending_wait(data)) { 712 kfree(powernow_table); 713 return -EIO; 714 } 715 716 pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); 717 data->powernow_table = powernow_table; 718 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) 719 print_basics(data); 720 721 for (j = 0; j < data->numps; j++) 722 if ((pst[j].fid == data->currfid) && 723 (pst[j].vid == data->currvid)) 724 return 0; 725 726 pr_debug("currfid/vid do not match PST, ignoring\n"); 727 return 0; 728 } 729 730 /* Find and validate the PSB/PST table in BIOS. */ 731 static int find_psb_table(struct powernow_k8_data *data) 732 { 733 struct psb_s *psb; 734 unsigned int i; 735 u32 mvs; 736 u8 maxvid; 737 u32 cpst = 0; 738 u32 thiscpuid; 739 740 for (i = 0xc0000; i < 0xffff0; i += 0x10) { 741 /* Scan BIOS looking for the signature. */ 742 /* It can not be at ffff0 - it is too big. */ 743 744 psb = phys_to_virt(i); 745 if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) 746 continue; 747 748 pr_debug("found PSB header at 0x%p\n", psb); 749 750 pr_debug("table vers: 0x%x\n", psb->tableversion); 751 if (psb->tableversion != PSB_VERSION_1_4) { 752 printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); 753 return -ENODEV; 754 } 755 756 pr_debug("flags: 0x%x\n", psb->flags1); 757 if (psb->flags1) { 758 printk(KERN_ERR FW_BUG PFX "unknown flags\n"); 759 return -ENODEV; 760 } 761 762 data->vstable = psb->vstable; 763 pr_debug("voltage stabilization time: %d(*20us)\n", 764 data->vstable); 765 766 pr_debug("flags2: 0x%x\n", psb->flags2); 767 data->rvo = psb->flags2 & 3; 768 data->irt = ((psb->flags2) >> 2) & 3; 769 mvs = ((psb->flags2) >> 4) & 3; 770 data->vidmvs = 1 << mvs; 771 data->batps = ((psb->flags2) >> 6) & 3; 772 773 pr_debug("ramp voltage offset: %d\n", data->rvo); 774 pr_debug("isochronous relief time: %d\n", data->irt); 775 pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); 776 777 pr_debug("numpst: 0x%x\n", psb->num_tables); 778 cpst = psb->num_tables; 779 if ((psb->cpuid == 0x00000fc0) || 780 (psb->cpuid == 0x00000fe0)) { 781 thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 782 if ((thiscpuid == 0x00000fc0) || 783 (thiscpuid == 0x00000fe0)) 784 cpst = 1; 785 } 786 if (cpst != 1) { 787 printk(KERN_ERR FW_BUG PFX "numpst must be 1\n"); 788 return -ENODEV; 789 } 790 791 data->plllock = psb->plllocktime; 792 pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); 793 pr_debug("maxfid: 0x%x\n", psb->maxfid); 794 pr_debug("maxvid: 0x%x\n", psb->maxvid); 795 maxvid = psb->maxvid; 796 797 data->numps = psb->numps; 798 pr_debug("numpstates: 0x%x\n", data->numps); 799 return fill_powernow_table(data, 800 (struct pst_s *)(psb+1), maxvid); 801 } 802 /* 803 * If you see this message, complain to BIOS manufacturer. If 804 * he tells you "we do not support Linux" or some similar 805 * nonsense, remember that Windows 2000 uses the same legacy 806 * mechanism that the old Linux PSB driver uses. Tell them it 807 * is broken with Windows 2000. 808 * 809 * The reference to the AMD documentation is chapter 9 in the 810 * BIOS and Kernel Developer's Guide, which is available on 811 * www.amd.com 812 */ 813 printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n"); 814 printk(KERN_ERR PFX "Make sure that your BIOS is up to date" 815 " and Cool'N'Quiet support is enabled in BIOS setup\n"); 816 return -ENODEV; 817 } 818 819 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, 820 unsigned int index) 821 { 822 u64 control; 823 824 if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) 825 return; 826 827 control = data->acpi_data.states[index].control; 828 data->irt = (control >> IRT_SHIFT) & IRT_MASK; 829 data->rvo = (control >> RVO_SHIFT) & RVO_MASK; 830 data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; 831 data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; 832 data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK); 833 data->vstable = (control >> VST_SHIFT) & VST_MASK; 834 } 835 836 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 837 { 838 struct cpufreq_frequency_table *powernow_table; 839 int ret_val = -ENODEV; 840 u64 control, status; 841 842 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 843 pr_debug("register performance failed: bad ACPI data\n"); 844 return -EIO; 845 } 846 847 /* verify the data contained in the ACPI structures */ 848 if (data->acpi_data.state_count <= 1) { 849 pr_debug("No ACPI P-States\n"); 850 goto err_out; 851 } 852 853 control = data->acpi_data.control_register.space_id; 854 status = data->acpi_data.status_register.space_id; 855 856 if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || 857 (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { 858 pr_debug("Invalid control/status registers (%llx - %llx)\n", 859 control, status); 860 goto err_out; 861 } 862 863 /* fill in data->powernow_table */ 864 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) 865 * (data->acpi_data.state_count + 1)), GFP_KERNEL); 866 if (!powernow_table) { 867 pr_debug("powernow_table memory alloc failure\n"); 868 goto err_out; 869 } 870 871 /* fill in data */ 872 data->numps = data->acpi_data.state_count; 873 powernow_k8_acpi_pst_values(data, 0); 874 875 if (cpu_family == CPU_HW_PSTATE) 876 ret_val = fill_powernow_table_pstate(data, powernow_table); 877 else 878 ret_val = fill_powernow_table_fidvid(data, powernow_table); 879 if (ret_val) 880 goto err_out_mem; 881 882 powernow_table[data->acpi_data.state_count].frequency = 883 CPUFREQ_TABLE_END; 884 powernow_table[data->acpi_data.state_count].index = 0; 885 data->powernow_table = powernow_table; 886 887 if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) 888 print_basics(data); 889 890 /* notify BIOS that we exist */ 891 acpi_processor_notify_smm(THIS_MODULE); 892 893 if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { 894 printk(KERN_ERR PFX 895 "unable to alloc powernow_k8_data cpumask\n"); 896 ret_val = -ENOMEM; 897 goto err_out_mem; 898 } 899 900 return 0; 901 902 err_out_mem: 903 kfree(powernow_table); 904 905 err_out: 906 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 907 908 /* data->acpi_data.state_count informs us at ->exit() 909 * whether ACPI was used */ 910 data->acpi_data.state_count = 0; 911 912 return ret_val; 913 } 914 915 static int fill_powernow_table_pstate(struct powernow_k8_data *data, 916 struct cpufreq_frequency_table *powernow_table) 917 { 918 int i; 919 u32 hi = 0, lo = 0; 920 rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi); 921 data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; 922 923 for (i = 0; i < data->acpi_data.state_count; i++) { 924 u32 index; 925 926 index = data->acpi_data.states[i].control & HW_PSTATE_MASK; 927 if (index > data->max_hw_pstate) { 928 printk(KERN_ERR PFX "invalid pstate %d - " 929 "bad value %d.\n", i, index); 930 printk(KERN_ERR PFX "Please report to BIOS " 931 "manufacturer\n"); 932 invalidate_entry(powernow_table, i); 933 continue; 934 } 935 936 ps_to_as[index] = i; 937 938 /* Frequency may be rounded for these */ 939 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) 940 || boot_cpu_data.x86 == 0x11) { 941 942 rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); 943 if (!(hi & HW_PSTATE_VALID_MASK)) { 944 pr_debug("invalid pstate %d, ignoring\n", index); 945 invalidate_entry(powernow_table, i); 946 continue; 947 } 948 949 powernow_table[i].frequency = 950 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); 951 } else 952 powernow_table[i].frequency = 953 data->acpi_data.states[i].core_frequency * 1000; 954 955 powernow_table[i].index = index; 956 } 957 return 0; 958 } 959 960 static int fill_powernow_table_fidvid(struct powernow_k8_data *data, 961 struct cpufreq_frequency_table *powernow_table) 962 { 963 int i; 964 965 for (i = 0; i < data->acpi_data.state_count; i++) { 966 u32 fid; 967 u32 vid; 968 u32 freq, index; 969 u64 status, control; 970 971 if (data->exttype) { 972 status = data->acpi_data.states[i].status; 973 fid = status & EXT_FID_MASK; 974 vid = (status >> VID_SHIFT) & EXT_VID_MASK; 975 } else { 976 control = data->acpi_data.states[i].control; 977 fid = control & FID_MASK; 978 vid = (control >> VID_SHIFT) & VID_MASK; 979 } 980 981 pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); 982 983 index = fid | (vid<<8); 984 powernow_table[i].index = index; 985 986 freq = find_khz_freq_from_fid(fid); 987 powernow_table[i].frequency = freq; 988 989 /* verify frequency is OK */ 990 if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { 991 pr_debug("invalid freq %u kHz, ignoring\n", freq); 992 invalidate_entry(powernow_table, i); 993 continue; 994 } 995 996 /* verify voltage is OK - 997 * BIOSs are using "off" to indicate invalid */ 998 if (vid == VID_OFF) { 999 pr_debug("invalid vid %u, ignoring\n", vid); 1000 invalidate_entry(powernow_table, i); 1001 continue; 1002 } 1003 1004 if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { 1005 printk(KERN_INFO PFX "invalid freq entries " 1006 "%u kHz vs. %u kHz\n", freq, 1007 (unsigned int) 1008 (data->acpi_data.states[i].core_frequency 1009 * 1000)); 1010 invalidate_entry(powernow_table, i); 1011 continue; 1012 } 1013 } 1014 return 0; 1015 } 1016 1017 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) 1018 { 1019 if (data->acpi_data.state_count) 1020 acpi_processor_unregister_performance(&data->acpi_data, 1021 data->cpu); 1022 free_cpumask_var(data->acpi_data.shared_cpu_map); 1023 } 1024 1025 static int get_transition_latency(struct powernow_k8_data *data) 1026 { 1027 int max_latency = 0; 1028 int i; 1029 for (i = 0; i < data->acpi_data.state_count; i++) { 1030 int cur_latency = data->acpi_data.states[i].transition_latency 1031 + data->acpi_data.states[i].bus_master_latency; 1032 if (cur_latency > max_latency) 1033 max_latency = cur_latency; 1034 } 1035 if (max_latency == 0) { 1036 /* 1037 * Fam 11h and later may return 0 as transition latency. This 1038 * is intended and means "very fast". While cpufreq core and 1039 * governors currently can handle that gracefully, better set it 1040 * to 1 to avoid problems in the future. 1041 */ 1042 if (boot_cpu_data.x86 < 0x11) 1043 printk(KERN_ERR FW_WARN PFX "Invalid zero transition " 1044 "latency\n"); 1045 max_latency = 1; 1046 } 1047 /* value in usecs, needs to be in nanoseconds */ 1048 return 1000 * max_latency; 1049 } 1050 1051 /* Take a frequency, and issue the fid/vid transition command */ 1052 static int transition_frequency_fidvid(struct powernow_k8_data *data, 1053 unsigned int index) 1054 { 1055 u32 fid = 0; 1056 u32 vid = 0; 1057 int res, i; 1058 struct cpufreq_freqs freqs; 1059 1060 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); 1061 1062 /* fid/vid correctness check for k8 */ 1063 /* fid are the lower 8 bits of the index we stored into 1064 * the cpufreq frequency table in find_psb_table, vid 1065 * are the upper 8 bits. 1066 */ 1067 fid = data->powernow_table[index].index & 0xFF; 1068 vid = (data->powernow_table[index].index & 0xFF00) >> 8; 1069 1070 pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); 1071 1072 if (query_current_values_with_pending_wait(data)) 1073 return 1; 1074 1075 if ((data->currvid == vid) && (data->currfid == fid)) { 1076 pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n", 1077 fid, vid); 1078 return 0; 1079 } 1080 1081 pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n", 1082 smp_processor_id(), fid, vid); 1083 freqs.old = find_khz_freq_from_fid(data->currfid); 1084 freqs.new = find_khz_freq_from_fid(fid); 1085 1086 for_each_cpu(i, data->available_cores) { 1087 freqs.cpu = i; 1088 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 1089 } 1090 1091 res = transition_fid_vid(data, fid, vid); 1092 if (res) 1093 return res; 1094 1095 freqs.new = find_khz_freq_from_fid(data->currfid); 1096 1097 for_each_cpu(i, data->available_cores) { 1098 freqs.cpu = i; 1099 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 1100 } 1101 return res; 1102 } 1103 1104 /* Take a frequency, and issue the hardware pstate transition command */ 1105 static int transition_frequency_pstate(struct powernow_k8_data *data, 1106 unsigned int index) 1107 { 1108 u32 pstate = 0; 1109 int res, i; 1110 struct cpufreq_freqs freqs; 1111 1112 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); 1113 1114 /* get MSR index for hardware pstate transition */ 1115 pstate = index & HW_PSTATE_MASK; 1116 if (pstate > data->max_hw_pstate) 1117 return -EINVAL; 1118 1119 freqs.old = find_khz_freq_from_pstate(data->powernow_table, 1120 data->currpstate); 1121 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); 1122 1123 for_each_cpu(i, data->available_cores) { 1124 freqs.cpu = i; 1125 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 1126 } 1127 1128 res = transition_pstate(data, pstate); 1129 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); 1130 1131 for_each_cpu(i, data->available_cores) { 1132 freqs.cpu = i; 1133 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 1134 } 1135 return res; 1136 } 1137 1138 /* Driver entry point to switch to the target frequency */ 1139 static int powernowk8_target(struct cpufreq_policy *pol, 1140 unsigned targfreq, unsigned relation) 1141 { 1142 cpumask_var_t oldmask; 1143 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1144 u32 checkfid; 1145 u32 checkvid; 1146 unsigned int newstate; 1147 int ret = -EIO; 1148 1149 if (!data) 1150 return -EINVAL; 1151 1152 checkfid = data->currfid; 1153 checkvid = data->currvid; 1154 1155 /* only run on specific CPU from here on. */ 1156 /* This is poor form: use a workqueue or smp_call_function_single */ 1157 if (!alloc_cpumask_var(&oldmask, GFP_KERNEL)) 1158 return -ENOMEM; 1159 1160 cpumask_copy(oldmask, tsk_cpus_allowed(current)); 1161 set_cpus_allowed_ptr(current, cpumask_of(pol->cpu)); 1162 1163 if (smp_processor_id() != pol->cpu) { 1164 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1165 goto err_out; 1166 } 1167 1168 if (pending_bit_stuck()) { 1169 printk(KERN_ERR PFX "failing targ, change pending bit set\n"); 1170 goto err_out; 1171 } 1172 1173 pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", 1174 pol->cpu, targfreq, pol->min, pol->max, relation); 1175 1176 if (query_current_values_with_pending_wait(data)) 1177 goto err_out; 1178 1179 if (cpu_family != CPU_HW_PSTATE) { 1180 pr_debug("targ: curr fid 0x%x, vid 0x%x\n", 1181 data->currfid, data->currvid); 1182 1183 if ((checkvid != data->currvid) || 1184 (checkfid != data->currfid)) { 1185 printk(KERN_INFO PFX 1186 "error - out of sync, fix 0x%x 0x%x, " 1187 "vid 0x%x 0x%x\n", 1188 checkfid, data->currfid, 1189 checkvid, data->currvid); 1190 } 1191 } 1192 1193 if (cpufreq_frequency_table_target(pol, data->powernow_table, 1194 targfreq, relation, &newstate)) 1195 goto err_out; 1196 1197 mutex_lock(&fidvid_mutex); 1198 1199 powernow_k8_acpi_pst_values(data, newstate); 1200 1201 if (cpu_family == CPU_HW_PSTATE) 1202 ret = transition_frequency_pstate(data, 1203 data->powernow_table[newstate].index); 1204 else 1205 ret = transition_frequency_fidvid(data, newstate); 1206 if (ret) { 1207 printk(KERN_ERR PFX "transition frequency failed\n"); 1208 ret = 1; 1209 mutex_unlock(&fidvid_mutex); 1210 goto err_out; 1211 } 1212 mutex_unlock(&fidvid_mutex); 1213 1214 if (cpu_family == CPU_HW_PSTATE) 1215 pol->cur = find_khz_freq_from_pstate(data->powernow_table, 1216 data->powernow_table[newstate].index); 1217 else 1218 pol->cur = find_khz_freq_from_fid(data->currfid); 1219 ret = 0; 1220 1221 err_out: 1222 set_cpus_allowed_ptr(current, oldmask); 1223 free_cpumask_var(oldmask); 1224 return ret; 1225 } 1226 1227 /* Driver entry point to verify the policy and range of frequencies */ 1228 static int powernowk8_verify(struct cpufreq_policy *pol) 1229 { 1230 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1231 1232 if (!data) 1233 return -EINVAL; 1234 1235 return cpufreq_frequency_table_verify(pol, data->powernow_table); 1236 } 1237 1238 struct init_on_cpu { 1239 struct powernow_k8_data *data; 1240 int rc; 1241 }; 1242 1243 static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu) 1244 { 1245 struct init_on_cpu *init_on_cpu = _init_on_cpu; 1246 1247 if (pending_bit_stuck()) { 1248 printk(KERN_ERR PFX "failing init, change pending bit set\n"); 1249 init_on_cpu->rc = -ENODEV; 1250 return; 1251 } 1252 1253 if (query_current_values_with_pending_wait(init_on_cpu->data)) { 1254 init_on_cpu->rc = -ENODEV; 1255 return; 1256 } 1257 1258 if (cpu_family == CPU_OPTERON) 1259 fidvid_msr_init(); 1260 1261 init_on_cpu->rc = 0; 1262 } 1263 1264 /* per CPU init entry point to the driver */ 1265 static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1266 { 1267 static const char ACPI_PSS_BIOS_BUG_MSG[] = 1268 KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" 1269 FW_BUG PFX "Try again with latest BIOS.\n"; 1270 struct powernow_k8_data *data; 1271 struct init_on_cpu init_on_cpu; 1272 int rc; 1273 struct cpuinfo_x86 *c = &cpu_data(pol->cpu); 1274 1275 if (!cpu_online(pol->cpu)) 1276 return -ENODEV; 1277 1278 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); 1279 if (rc) 1280 return -ENODEV; 1281 1282 data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); 1283 if (!data) { 1284 printk(KERN_ERR PFX "unable to alloc powernow_k8_data"); 1285 return -ENOMEM; 1286 } 1287 1288 data->cpu = pol->cpu; 1289 data->currpstate = HW_PSTATE_INVALID; 1290 1291 if (powernow_k8_cpu_init_acpi(data)) { 1292 /* 1293 * Use the PSB BIOS structure. This is only available on 1294 * an UP version, and is deprecated by AMD. 1295 */ 1296 if (num_online_cpus() != 1) { 1297 printk_once(ACPI_PSS_BIOS_BUG_MSG); 1298 goto err_out; 1299 } 1300 if (pol->cpu != 0) { 1301 printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " 1302 "CPU other than CPU0. Complain to your BIOS " 1303 "vendor.\n"); 1304 goto err_out; 1305 } 1306 rc = find_psb_table(data); 1307 if (rc) 1308 goto err_out; 1309 1310 /* Take a crude guess here. 1311 * That guess was in microseconds, so multiply with 1000 */ 1312 pol->cpuinfo.transition_latency = ( 1313 ((data->rvo + 8) * data->vstable * VST_UNITS_20US) + 1314 ((1 << data->irt) * 30)) * 1000; 1315 } else /* ACPI _PSS objects available */ 1316 pol->cpuinfo.transition_latency = get_transition_latency(data); 1317 1318 /* only run on specific CPU from here on */ 1319 init_on_cpu.data = data; 1320 smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu, 1321 &init_on_cpu, 1); 1322 rc = init_on_cpu.rc; 1323 if (rc != 0) 1324 goto err_out_exit_acpi; 1325 1326 if (cpu_family == CPU_HW_PSTATE) 1327 cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); 1328 else 1329 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); 1330 data->available_cores = pol->cpus; 1331 1332 if (cpu_family == CPU_HW_PSTATE) 1333 pol->cur = find_khz_freq_from_pstate(data->powernow_table, 1334 data->currpstate); 1335 else 1336 pol->cur = find_khz_freq_from_fid(data->currfid); 1337 pr_debug("policy current frequency %d kHz\n", pol->cur); 1338 1339 /* min/max the cpu is capable of */ 1340 if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { 1341 printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n"); 1342 powernow_k8_cpu_exit_acpi(data); 1343 kfree(data->powernow_table); 1344 kfree(data); 1345 return -EINVAL; 1346 } 1347 1348 /* Check for APERF/MPERF support in hardware */ 1349 if (cpu_has(c, X86_FEATURE_APERFMPERF)) 1350 cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf; 1351 1352 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); 1353 1354 if (cpu_family == CPU_HW_PSTATE) 1355 pr_debug("cpu_init done, current pstate 0x%x\n", 1356 data->currpstate); 1357 else 1358 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", 1359 data->currfid, data->currvid); 1360 1361 per_cpu(powernow_data, pol->cpu) = data; 1362 1363 return 0; 1364 1365 err_out_exit_acpi: 1366 powernow_k8_cpu_exit_acpi(data); 1367 1368 err_out: 1369 kfree(data); 1370 return -ENODEV; 1371 } 1372 1373 static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) 1374 { 1375 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1376 1377 if (!data) 1378 return -EINVAL; 1379 1380 powernow_k8_cpu_exit_acpi(data); 1381 1382 cpufreq_frequency_table_put_attr(pol->cpu); 1383 1384 kfree(data->powernow_table); 1385 kfree(data); 1386 per_cpu(powernow_data, pol->cpu) = NULL; 1387 1388 return 0; 1389 } 1390 1391 static void query_values_on_cpu(void *_err) 1392 { 1393 int *err = _err; 1394 struct powernow_k8_data *data = __this_cpu_read(powernow_data); 1395 1396 *err = query_current_values_with_pending_wait(data); 1397 } 1398 1399 static unsigned int powernowk8_get(unsigned int cpu) 1400 { 1401 struct powernow_k8_data *data = per_cpu(powernow_data, cpu); 1402 unsigned int khz = 0; 1403 int err; 1404 1405 if (!data) 1406 return 0; 1407 1408 smp_call_function_single(cpu, query_values_on_cpu, &err, true); 1409 if (err) 1410 goto out; 1411 1412 if (cpu_family == CPU_HW_PSTATE) 1413 khz = find_khz_freq_from_pstate(data->powernow_table, 1414 data->currpstate); 1415 else 1416 khz = find_khz_freq_from_fid(data->currfid); 1417 1418 1419 out: 1420 return khz; 1421 } 1422 1423 static void _cpb_toggle_msrs(bool t) 1424 { 1425 int cpu; 1426 1427 get_online_cpus(); 1428 1429 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); 1430 1431 for_each_cpu(cpu, cpu_online_mask) { 1432 struct msr *reg = per_cpu_ptr(msrs, cpu); 1433 if (t) 1434 reg->l &= ~BIT(25); 1435 else 1436 reg->l |= BIT(25); 1437 } 1438 wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); 1439 1440 put_online_cpus(); 1441 } 1442 1443 /* 1444 * Switch on/off core performance boosting. 1445 * 1446 * 0=disable 1447 * 1=enable. 1448 */ 1449 static void cpb_toggle(bool t) 1450 { 1451 if (!cpb_capable) 1452 return; 1453 1454 if (t && !cpb_enabled) { 1455 cpb_enabled = true; 1456 _cpb_toggle_msrs(t); 1457 printk(KERN_INFO PFX "Core Boosting enabled.\n"); 1458 } else if (!t && cpb_enabled) { 1459 cpb_enabled = false; 1460 _cpb_toggle_msrs(t); 1461 printk(KERN_INFO PFX "Core Boosting disabled.\n"); 1462 } 1463 } 1464 1465 static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, 1466 size_t count) 1467 { 1468 int ret = -EINVAL; 1469 unsigned long val = 0; 1470 1471 ret = strict_strtoul(buf, 10, &val); 1472 if (!ret && (val == 0 || val == 1) && cpb_capable) 1473 cpb_toggle(val); 1474 else 1475 return -EINVAL; 1476 1477 return count; 1478 } 1479 1480 static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) 1481 { 1482 return sprintf(buf, "%u\n", cpb_enabled); 1483 } 1484 1485 #define define_one_rw(_name) \ 1486 static struct freq_attr _name = \ 1487 __ATTR(_name, 0644, show_##_name, store_##_name) 1488 1489 define_one_rw(cpb); 1490 1491 static struct freq_attr *powernow_k8_attr[] = { 1492 &cpufreq_freq_attr_scaling_available_freqs, 1493 &cpb, 1494 NULL, 1495 }; 1496 1497 static struct cpufreq_driver cpufreq_amd64_driver = { 1498 .verify = powernowk8_verify, 1499 .target = powernowk8_target, 1500 .bios_limit = acpi_processor_get_bios_limit, 1501 .init = powernowk8_cpu_init, 1502 .exit = __devexit_p(powernowk8_cpu_exit), 1503 .get = powernowk8_get, 1504 .name = "powernow-k8", 1505 .owner = THIS_MODULE, 1506 .attr = powernow_k8_attr, 1507 }; 1508 1509 /* 1510 * Clear the boost-disable flag on the CPU_DOWN path so that this cpu 1511 * cannot block the remaining ones from boosting. On the CPU_UP path we 1512 * simply keep the boost-disable flag in sync with the current global 1513 * state. 1514 */ 1515 static int cpb_notify(struct notifier_block *nb, unsigned long action, 1516 void *hcpu) 1517 { 1518 unsigned cpu = (long)hcpu; 1519 u32 lo, hi; 1520 1521 switch (action) { 1522 case CPU_UP_PREPARE: 1523 case CPU_UP_PREPARE_FROZEN: 1524 1525 if (!cpb_enabled) { 1526 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); 1527 lo |= BIT(25); 1528 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi); 1529 } 1530 break; 1531 1532 case CPU_DOWN_PREPARE: 1533 case CPU_DOWN_PREPARE_FROZEN: 1534 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); 1535 lo &= ~BIT(25); 1536 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi); 1537 break; 1538 1539 default: 1540 break; 1541 } 1542 1543 return NOTIFY_OK; 1544 } 1545 1546 static struct notifier_block cpb_nb = { 1547 .notifier_call = cpb_notify, 1548 }; 1549 1550 /* driver entry point for init */ 1551 static int __cpuinit powernowk8_init(void) 1552 { 1553 unsigned int i, supported_cpus = 0, cpu; 1554 int rv; 1555 1556 for_each_online_cpu(i) { 1557 int rc; 1558 smp_call_function_single(i, check_supported_cpu, &rc, 1); 1559 if (rc == 0) 1560 supported_cpus++; 1561 } 1562 1563 if (supported_cpus != num_online_cpus()) 1564 return -ENODEV; 1565 1566 printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", 1567 num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus); 1568 1569 if (boot_cpu_has(X86_FEATURE_CPB)) { 1570 1571 cpb_capable = true; 1572 1573 msrs = msrs_alloc(); 1574 if (!msrs) { 1575 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); 1576 return -ENOMEM; 1577 } 1578 1579 register_cpu_notifier(&cpb_nb); 1580 1581 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); 1582 1583 for_each_cpu(cpu, cpu_online_mask) { 1584 struct msr *reg = per_cpu_ptr(msrs, cpu); 1585 cpb_enabled |= !(!!(reg->l & BIT(25))); 1586 } 1587 1588 printk(KERN_INFO PFX "Core Performance Boosting: %s.\n", 1589 (cpb_enabled ? "on" : "off")); 1590 } 1591 1592 rv = cpufreq_register_driver(&cpufreq_amd64_driver); 1593 if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) { 1594 unregister_cpu_notifier(&cpb_nb); 1595 msrs_free(msrs); 1596 msrs = NULL; 1597 } 1598 return rv; 1599 } 1600 1601 /* driver entry point for term */ 1602 static void __exit powernowk8_exit(void) 1603 { 1604 pr_debug("exit\n"); 1605 1606 if (boot_cpu_has(X86_FEATURE_CPB)) { 1607 msrs_free(msrs); 1608 msrs = NULL; 1609 1610 unregister_cpu_notifier(&cpb_nb); 1611 } 1612 1613 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1614 } 1615 1616 MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and " 1617 "Mark Langsdorf <mark.langsdorf@amd.com>"); 1618 MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); 1619 MODULE_LICENSE("GPL"); 1620 1621 late_initcall(powernowk8_init); 1622 module_exit(powernowk8_exit); 1623