1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2005 Nate Lawson 5 * Copyright (c) 2004 Colin Percival 6 * Copyright (c) 2004-2005 Bruno Durcot 7 * Copyright (c) 2004 FUKUDA Nobuhiko 8 * Copyright (c) 2009 Michael Reifenberger 9 * Copyright (c) 2009 Norikatsu Shigemura 10 * Copyright (c) 2008-2009 Gen Otsuji 11 * 12 * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c 13 * in various parts. The authors of these files are Nate Lawson, 14 * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko. 15 * This code contains patches by Michael Reifenberger and Norikatsu Shigemura. 16 * Thank you. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted providing that the following conditions 20 * are met: 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR 28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * For more info: 42 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors 43 * 31116 Rev 3.20 February 04, 2009 44 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors 45 * 41256 Rev 3.00 - July 07, 2008 46 */ 47 48 #include <sys/cdefs.h> 49 __FBSDID("$FreeBSD$"); 50 51 #include <sys/param.h> 52 #include <sys/bus.h> 53 #include <sys/cpu.h> 54 #include <sys/kernel.h> 55 #include <sys/module.h> 56 #include <sys/malloc.h> 57 #include <sys/proc.h> 58 #include <sys/pcpu.h> 59 #include <sys/smp.h> 60 #include <sys/sched.h> 61 62 #include <machine/md_var.h> 63 #include <machine/cputypes.h> 64 #include <machine/specialreg.h> 65 66 #include <contrib/dev/acpica/include/acpi.h> 67 68 #include <dev/acpica/acpivar.h> 69 70 #include "acpi_if.h" 71 #include "cpufreq_if.h" 72 73 #define MSR_AMD_10H_11H_LIMIT 0xc0010061 74 #define MSR_AMD_10H_11H_CONTROL 0xc0010062 75 #define MSR_AMD_10H_11H_STATUS 0xc0010063 76 #define MSR_AMD_10H_11H_CONFIG 0xc0010064 77 78 #define AMD_10H_11H_MAX_STATES 16 79 80 /* for MSR_AMD_10H_11H_LIMIT C001_0061 */ 81 #define AMD_10H_11H_GET_PSTATE_MAX_VAL(msr) (((msr) >> 4) & 0x7) 82 #define AMD_10H_11H_GET_PSTATE_LIMIT(msr) (((msr)) & 0x7) 83 /* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */ 84 #define AMD_10H_11H_CUR_VID(msr) (((msr) >> 9) & 0x7F) 85 #define AMD_10H_11H_CUR_DID(msr) (((msr) >> 6) & 0x07) 86 #define AMD_10H_11H_CUR_FID(msr) ((msr) & 0x3F) 87 88 #define AMD_17H_CUR_IDIV(msr) (((msr) >> 30) & 0x03) 89 #define AMD_17H_CUR_IDD(msr) (((msr) >> 22) & 0xFF) 90 #define AMD_17H_CUR_VID(msr) (((msr) >> 14) & 0xFF) 91 #define AMD_17H_CUR_DID(msr) (((msr) >> 8) & 0x3F) 92 #define AMD_17H_CUR_FID(msr) ((msr) & 0xFF) 93 94 #define HWPSTATE_DEBUG(dev, msg...) \ 95 do { \ 96 if (hwpstate_verbose) \ 97 device_printf(dev, msg); \ 98 } while (0) 99 100 struct hwpstate_setting { 101 int freq; /* CPU clock in Mhz or 100ths of a percent. */ 102 int volts; /* Voltage in mV. */ 103 int power; /* Power consumed in mW. */ 104 int lat; /* Transition latency in us. */ 105 int pstate_id; /* P-State id */ 106 }; 107 108 struct hwpstate_softc { 109 device_t dev; 110 struct hwpstate_setting hwpstate_settings[AMD_10H_11H_MAX_STATES]; 111 int cfnum; 112 }; 113 114 static void hwpstate_identify(driver_t *driver, device_t parent); 115 static int hwpstate_probe(device_t dev); 116 static int hwpstate_attach(device_t dev); 117 static int hwpstate_detach(device_t dev); 118 static int hwpstate_set(device_t dev, const struct cf_setting *cf); 119 static int hwpstate_get(device_t dev, struct cf_setting *cf); 120 static int hwpstate_settings(device_t dev, struct cf_setting *sets, int *count); 121 static int hwpstate_type(device_t dev, int *type); 122 static int hwpstate_shutdown(device_t dev); 123 static int hwpstate_features(driver_t *driver, u_int *features); 124 static int hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev); 125 static int hwpstate_get_info_from_msr(device_t dev); 126 static int hwpstate_goto_pstate(device_t dev, int pstate_id); 127 128 static int hwpstate_verbose; 129 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RWTUN, 130 &hwpstate_verbose, 0, "Debug hwpstate"); 131 132 static int hwpstate_verify; 133 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verify, CTLFLAG_RWTUN, 134 &hwpstate_verify, 0, "Verify P-state after setting"); 135 136 static bool hwpstate_pstate_limit; 137 SYSCTL_BOOL(_debug, OID_AUTO, hwpstate_pstate_limit, CTLFLAG_RWTUN, 138 &hwpstate_pstate_limit, 0, 139 "If enabled (1), limit administrative control of P-states to the value in " 140 "CurPstateLimit"); 141 142 static device_method_t hwpstate_methods[] = { 143 /* Device interface */ 144 DEVMETHOD(device_identify, hwpstate_identify), 145 DEVMETHOD(device_probe, hwpstate_probe), 146 DEVMETHOD(device_attach, hwpstate_attach), 147 DEVMETHOD(device_detach, hwpstate_detach), 148 DEVMETHOD(device_shutdown, hwpstate_shutdown), 149 150 /* cpufreq interface */ 151 DEVMETHOD(cpufreq_drv_set, hwpstate_set), 152 DEVMETHOD(cpufreq_drv_get, hwpstate_get), 153 DEVMETHOD(cpufreq_drv_settings, hwpstate_settings), 154 DEVMETHOD(cpufreq_drv_type, hwpstate_type), 155 156 /* ACPI interface */ 157 DEVMETHOD(acpi_get_features, hwpstate_features), 158 {0, 0} 159 }; 160 161 static driver_t hwpstate_driver = { 162 "hwpstate", 163 hwpstate_methods, 164 sizeof(struct hwpstate_softc), 165 }; 166 167 DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, 0, 0); 168 169 /* 170 * Go to Px-state on all cpus, considering the limit register (if so 171 * configured). 172 */ 173 static int 174 hwpstate_goto_pstate(device_t dev, int id) 175 { 176 sbintime_t sbt; 177 uint64_t msr; 178 int cpu, i, j, limit; 179 180 if (hwpstate_pstate_limit) { 181 /* get the current pstate limit */ 182 msr = rdmsr(MSR_AMD_10H_11H_LIMIT); 183 limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr); 184 if (limit > id) { 185 HWPSTATE_DEBUG(dev, "Restricting requested P%d to P%d " 186 "due to HW limit\n", id, limit); 187 id = limit; 188 } 189 } 190 191 cpu = curcpu; 192 HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, cpu); 193 /* Go To Px-state */ 194 wrmsr(MSR_AMD_10H_11H_CONTROL, id); 195 196 /* 197 * We are going to the same Px-state on all cpus. 198 * Probably should take _PSD into account. 199 */ 200 CPU_FOREACH(i) { 201 if (i == cpu) 202 continue; 203 204 /* Bind to each cpu. */ 205 thread_lock(curthread); 206 sched_bind(curthread, i); 207 thread_unlock(curthread); 208 HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, i); 209 /* Go To Px-state */ 210 wrmsr(MSR_AMD_10H_11H_CONTROL, id); 211 } 212 213 /* 214 * Verify whether each core is in the requested P-state. 215 */ 216 if (hwpstate_verify) { 217 CPU_FOREACH(i) { 218 thread_lock(curthread); 219 sched_bind(curthread, i); 220 thread_unlock(curthread); 221 /* wait loop (100*100 usec is enough ?) */ 222 for (j = 0; j < 100; j++) { 223 /* get the result. not assure msr=id */ 224 msr = rdmsr(MSR_AMD_10H_11H_STATUS); 225 if (msr == id) 226 break; 227 sbt = SBT_1MS / 10; 228 tsleep_sbt(dev, PZERO, "pstate_goto", sbt, 229 sbt >> tc_precexp, 0); 230 } 231 HWPSTATE_DEBUG(dev, "result: P%d-state on cpu%d\n", 232 (int)msr, i); 233 if (msr != id) { 234 HWPSTATE_DEBUG(dev, 235 "error: loop is not enough.\n"); 236 return (ENXIO); 237 } 238 } 239 } 240 241 return (0); 242 } 243 244 static int 245 hwpstate_set(device_t dev, const struct cf_setting *cf) 246 { 247 struct hwpstate_softc *sc; 248 struct hwpstate_setting *set; 249 int i; 250 251 if (cf == NULL) 252 return (EINVAL); 253 sc = device_get_softc(dev); 254 set = sc->hwpstate_settings; 255 for (i = 0; i < sc->cfnum; i++) 256 if (CPUFREQ_CMP(cf->freq, set[i].freq)) 257 break; 258 if (i == sc->cfnum) 259 return (EINVAL); 260 261 return (hwpstate_goto_pstate(dev, set[i].pstate_id)); 262 } 263 264 static int 265 hwpstate_get(device_t dev, struct cf_setting *cf) 266 { 267 struct hwpstate_softc *sc; 268 struct hwpstate_setting set; 269 uint64_t msr; 270 271 sc = device_get_softc(dev); 272 if (cf == NULL) 273 return (EINVAL); 274 msr = rdmsr(MSR_AMD_10H_11H_STATUS); 275 if (msr >= sc->cfnum) 276 return (EINVAL); 277 set = sc->hwpstate_settings[msr]; 278 279 cf->freq = set.freq; 280 cf->volts = set.volts; 281 cf->power = set.power; 282 cf->lat = set.lat; 283 cf->dev = dev; 284 return (0); 285 } 286 287 static int 288 hwpstate_settings(device_t dev, struct cf_setting *sets, int *count) 289 { 290 struct hwpstate_softc *sc; 291 struct hwpstate_setting set; 292 int i; 293 294 if (sets == NULL || count == NULL) 295 return (EINVAL); 296 sc = device_get_softc(dev); 297 if (*count < sc->cfnum) 298 return (E2BIG); 299 for (i = 0; i < sc->cfnum; i++, sets++) { 300 set = sc->hwpstate_settings[i]; 301 sets->freq = set.freq; 302 sets->volts = set.volts; 303 sets->power = set.power; 304 sets->lat = set.lat; 305 sets->dev = dev; 306 } 307 *count = sc->cfnum; 308 309 return (0); 310 } 311 312 static int 313 hwpstate_type(device_t dev, int *type) 314 { 315 316 if (type == NULL) 317 return (EINVAL); 318 319 *type = CPUFREQ_TYPE_ABSOLUTE; 320 return (0); 321 } 322 323 static void 324 hwpstate_identify(driver_t *driver, device_t parent) 325 { 326 327 if (device_find_child(parent, "hwpstate", -1) != NULL) 328 return; 329 330 if ((cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10) && 331 cpu_vendor_id != CPU_VENDOR_HYGON) 332 return; 333 334 /* 335 * Check if hardware pstate enable bit is set. 336 */ 337 if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) { 338 HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n"); 339 return; 340 } 341 342 if (resource_disabled("hwpstate", 0)) 343 return; 344 345 if (BUS_ADD_CHILD(parent, 10, "hwpstate", device_get_unit(parent)) 346 == NULL) 347 device_printf(parent, "hwpstate: add child failed\n"); 348 } 349 350 static int 351 hwpstate_probe(device_t dev) 352 { 353 struct hwpstate_softc *sc; 354 device_t perf_dev; 355 uint64_t msr; 356 int error, type; 357 358 /* 359 * Only hwpstate0. 360 * It goes well with acpi_throttle. 361 */ 362 if (device_get_unit(dev) != 0) 363 return (ENXIO); 364 365 sc = device_get_softc(dev); 366 sc->dev = dev; 367 368 /* 369 * Check if acpi_perf has INFO only flag. 370 */ 371 perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); 372 error = TRUE; 373 if (perf_dev && device_is_attached(perf_dev)) { 374 error = CPUFREQ_DRV_TYPE(perf_dev, &type); 375 if (error == 0) { 376 if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) { 377 /* 378 * If acpi_perf doesn't have INFO_ONLY flag, 379 * it will take care of pstate transitions. 380 */ 381 HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n"); 382 return (ENXIO); 383 } else { 384 /* 385 * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW) 386 * we can get _PSS info from acpi_perf 387 * without going into ACPI. 388 */ 389 HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n"); 390 error = hwpstate_get_info_from_acpi_perf(dev, perf_dev); 391 } 392 } 393 } 394 395 if (error == 0) { 396 /* 397 * Now we get _PSS info from acpi_perf without error. 398 * Let's check it. 399 */ 400 msr = rdmsr(MSR_AMD_10H_11H_LIMIT); 401 if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) { 402 HWPSTATE_DEBUG(dev, "MSR (%jd) and ACPI _PSS (%d)" 403 " count mismatch\n", (intmax_t)msr, sc->cfnum); 404 error = TRUE; 405 } 406 } 407 408 /* 409 * If we cannot get info from acpi_perf, 410 * Let's get info from MSRs. 411 */ 412 if (error) 413 error = hwpstate_get_info_from_msr(dev); 414 if (error) 415 return (error); 416 417 device_set_desc(dev, "Cool`n'Quiet 2.0"); 418 return (0); 419 } 420 421 static int 422 hwpstate_attach(device_t dev) 423 { 424 425 return (cpufreq_register(dev)); 426 } 427 428 static int 429 hwpstate_get_info_from_msr(device_t dev) 430 { 431 struct hwpstate_softc *sc; 432 struct hwpstate_setting *hwpstate_set; 433 uint64_t msr; 434 int family, i, fid, did; 435 436 family = CPUID_TO_FAMILY(cpu_id); 437 sc = device_get_softc(dev); 438 /* Get pstate count */ 439 msr = rdmsr(MSR_AMD_10H_11H_LIMIT); 440 sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr); 441 hwpstate_set = sc->hwpstate_settings; 442 for (i = 0; i < sc->cfnum; i++) { 443 msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i); 444 if ((msr & ((uint64_t)1 << 63)) == 0) { 445 HWPSTATE_DEBUG(dev, "msr is not valid.\n"); 446 return (ENXIO); 447 } 448 did = AMD_10H_11H_CUR_DID(msr); 449 fid = AMD_10H_11H_CUR_FID(msr); 450 451 hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN; 452 hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN; 453 hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN; 454 /* Convert fid/did to frequency. */ 455 switch (family) { 456 case 0x11: 457 hwpstate_set[i].freq = (100 * (fid + 0x08)) >> did; 458 break; 459 case 0x10: 460 case 0x12: 461 case 0x15: 462 case 0x16: 463 hwpstate_set[i].freq = (100 * (fid + 0x10)) >> did; 464 break; 465 case 0x17: 466 case 0x18: 467 did = AMD_17H_CUR_DID(msr); 468 if (did == 0) { 469 HWPSTATE_DEBUG(dev, "unexpected did: 0\n"); 470 did = 1; 471 } 472 fid = AMD_17H_CUR_FID(msr); 473 hwpstate_set[i].freq = (200 * fid) / did; 474 /* Vid step is 6.25mV, so scale by 100. */ 475 hwpstate_set[i].volts = 476 (155000 - (625 * AMD_17H_CUR_VID(msr))) / 100; 477 /* 478 * Calculate current first. 479 * This equation is mentioned in 480 * "BKDG for AMD Family 15h Models 70h-7fh Processors", 481 * section 2.5.2.1.6. 482 */ 483 hwpstate_set[i].power = AMD_17H_CUR_IDD(msr) * 1000; 484 switch (AMD_17H_CUR_IDIV(msr)) { 485 case 3: /* divide by 1000 */ 486 hwpstate_set[i].power /= 10; 487 case 2: /* divide by 100 */ 488 hwpstate_set[i].power /= 10; 489 case 1: /* divide by 10 */ 490 hwpstate_set[i].power /= 10; 491 case 0: /* divide by 1 */ 492 ; 493 } 494 hwpstate_set[i].power *= hwpstate_set[i].volts; 495 /* Milli amps * milli volts to milli watts. */ 496 hwpstate_set[i].power /= 1000; 497 break; 498 default: 499 HWPSTATE_DEBUG(dev, "get_info_from_msr: %s family" 500 " 0x%02x CPUs are not supported yet\n", 501 cpu_vendor_id == CPU_VENDOR_HYGON ? "Hygon" : "AMD", 502 family); 503 return (ENXIO); 504 } 505 hwpstate_set[i].pstate_id = i; 506 } 507 return (0); 508 } 509 510 static int 511 hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev) 512 { 513 struct hwpstate_softc *sc; 514 struct cf_setting *perf_set; 515 struct hwpstate_setting *hwpstate_set; 516 int count, error, i; 517 518 perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT); 519 if (perf_set == NULL) { 520 HWPSTATE_DEBUG(dev, "nomem\n"); 521 return (ENOMEM); 522 } 523 /* 524 * Fetch settings from acpi_perf. 525 * Now it is attached, and has info only flag. 526 */ 527 count = MAX_SETTINGS; 528 error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count); 529 if (error) { 530 HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n"); 531 goto out; 532 } 533 sc = device_get_softc(dev); 534 sc->cfnum = count; 535 hwpstate_set = sc->hwpstate_settings; 536 for (i = 0; i < count; i++) { 537 if (i == perf_set[i].spec[0]) { 538 hwpstate_set[i].pstate_id = i; 539 hwpstate_set[i].freq = perf_set[i].freq; 540 hwpstate_set[i].volts = perf_set[i].volts; 541 hwpstate_set[i].power = perf_set[i].power; 542 hwpstate_set[i].lat = perf_set[i].lat; 543 } else { 544 HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n"); 545 error = ENXIO; 546 goto out; 547 } 548 } 549 out: 550 if (perf_set) 551 free(perf_set, M_TEMP); 552 return (error); 553 } 554 555 static int 556 hwpstate_detach(device_t dev) 557 { 558 559 hwpstate_goto_pstate(dev, 0); 560 return (cpufreq_unregister(dev)); 561 } 562 563 static int 564 hwpstate_shutdown(device_t dev) 565 { 566 567 /* hwpstate_goto_pstate(dev, 0); */ 568 return (0); 569 } 570 571 static int 572 hwpstate_features(driver_t *driver, u_int *features) 573 { 574 575 /* Notify the ACPI CPU that we support direct access to MSRs */ 576 *features = ACPI_CAP_PERF_MSRS; 577 return (0); 578 } 579