1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2005 Nate Lawson 5 * Copyright (c) 2004 Colin Percival 6 * Copyright (c) 2004-2005 Bruno Durcot 7 * Copyright (c) 2004 FUKUDA Nobuhiko 8 * Copyright (c) 2009 Michael Reifenberger 9 * Copyright (c) 2009 Norikatsu Shigemura 10 * Copyright (c) 2008-2009 Gen Otsuji 11 * 12 * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c 13 * in various parts. The authors of these files are Nate Lawson, 14 * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko. 15 * This code contains patches by Michael Reifenberger and Norikatsu Shigemura. 16 * Thank you. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted providing that the following conditions 20 * are met: 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR 28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * For more info: 42 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors 43 * 31116 Rev 3.20 February 04, 2009 44 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors 45 * 41256 Rev 3.00 - July 07, 2008 46 */ 47 48 #include <sys/param.h> 49 #include <sys/bus.h> 50 #include <sys/cpu.h> 51 #include <sys/kernel.h> 52 #include <sys/module.h> 53 #include <sys/malloc.h> 54 #include <sys/proc.h> 55 #include <sys/pcpu.h> 56 #include <sys/smp.h> 57 #include <sys/sched.h> 58 59 #include <machine/md_var.h> 60 #include <machine/cputypes.h> 61 #include <machine/specialreg.h> 62 63 #include <contrib/dev/acpica/include/acpi.h> 64 65 #include <dev/acpica/acpivar.h> 66 67 #include "acpi_if.h" 68 #include "cpufreq_if.h" 69 70 #define MSR_AMD_10H_11H_LIMIT 0xc0010061 71 #define MSR_AMD_10H_11H_CONTROL 0xc0010062 72 #define MSR_AMD_10H_11H_STATUS 0xc0010063 73 #define MSR_AMD_10H_11H_CONFIG 0xc0010064 74 75 #define AMD_10H_11H_MAX_STATES 16 76 77 /* for MSR_AMD_10H_11H_LIMIT C001_0061 */ 78 #define AMD_10H_11H_GET_PSTATE_MAX_VAL(msr) (((msr) >> 4) & 0x7) 79 #define AMD_10H_11H_GET_PSTATE_LIMIT(msr) (((msr)) & 0x7) 80 /* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */ 81 #define AMD_10H_11H_CUR_VID(msr) (((msr) >> 9) & 0x7F) 82 #define AMD_10H_11H_CUR_DID(msr) (((msr) >> 6) & 0x07) 83 #define AMD_10H_11H_CUR_FID(msr) ((msr) & 0x3F) 84 85 #define AMD_17H_CUR_IDIV(msr) (((msr) >> 30) & 0x03) 86 #define AMD_17H_CUR_IDD(msr) (((msr) >> 22) & 0xFF) 87 #define AMD_17H_CUR_VID(msr) (((msr) >> 14) & 0xFF) 88 #define AMD_17H_CUR_DID(msr) (((msr) >> 8) & 0x3F) 89 #define AMD_17H_CUR_FID(msr) ((msr) & 0xFF) 90 91 #define HWPSTATE_DEBUG(dev, msg...) \ 92 do { \ 93 if (hwpstate_verbose) \ 94 device_printf(dev, msg); \ 95 } while (0) 96 97 struct hwpstate_setting { 98 int freq; /* CPU clock in Mhz or 100ths of a percent. */ 99 int volts; /* Voltage in mV. */ 100 int power; /* Power consumed in mW. */ 101 int lat; /* Transition latency in us. */ 102 int pstate_id; /* P-State id */ 103 }; 104 105 struct hwpstate_softc { 106 device_t dev; 107 struct hwpstate_setting hwpstate_settings[AMD_10H_11H_MAX_STATES]; 108 int cfnum; 109 }; 110 111 static void hwpstate_identify(driver_t *driver, device_t parent); 112 static int hwpstate_probe(device_t dev); 113 static int hwpstate_attach(device_t dev); 114 static int hwpstate_detach(device_t dev); 115 static int hwpstate_set(device_t dev, const struct cf_setting *cf); 116 static int hwpstate_get(device_t dev, struct cf_setting *cf); 117 static int hwpstate_settings(device_t dev, struct cf_setting *sets, int *count); 118 static int hwpstate_type(device_t dev, int *type); 119 static int hwpstate_shutdown(device_t dev); 120 static int hwpstate_features(driver_t *driver, u_int *features); 121 static int hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev); 122 static int hwpstate_get_info_from_msr(device_t dev); 123 static int hwpstate_goto_pstate(device_t dev, int pstate_id); 124 125 static int hwpstate_verbose; 126 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RWTUN, 127 &hwpstate_verbose, 0, "Debug hwpstate"); 128 129 static int hwpstate_verify; 130 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verify, CTLFLAG_RWTUN, 131 &hwpstate_verify, 0, "Verify P-state after setting"); 132 133 static bool hwpstate_pstate_limit; 134 SYSCTL_BOOL(_debug, OID_AUTO, hwpstate_pstate_limit, CTLFLAG_RWTUN, 135 &hwpstate_pstate_limit, 0, 136 "If enabled (1), limit administrative control of P-states to the value in " 137 "CurPstateLimit"); 138 139 static device_method_t hwpstate_methods[] = { 140 /* Device interface */ 141 DEVMETHOD(device_identify, hwpstate_identify), 142 DEVMETHOD(device_probe, hwpstate_probe), 143 DEVMETHOD(device_attach, hwpstate_attach), 144 DEVMETHOD(device_detach, hwpstate_detach), 145 DEVMETHOD(device_shutdown, hwpstate_shutdown), 146 147 /* cpufreq interface */ 148 DEVMETHOD(cpufreq_drv_set, hwpstate_set), 149 DEVMETHOD(cpufreq_drv_get, hwpstate_get), 150 DEVMETHOD(cpufreq_drv_settings, hwpstate_settings), 151 DEVMETHOD(cpufreq_drv_type, hwpstate_type), 152 153 /* ACPI interface */ 154 DEVMETHOD(acpi_get_features, hwpstate_features), 155 {0, 0} 156 }; 157 158 static driver_t hwpstate_driver = { 159 "hwpstate", 160 hwpstate_methods, 161 sizeof(struct hwpstate_softc), 162 }; 163 164 DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, 0, 0); 165 166 /* 167 * Go to Px-state on all cpus, considering the limit register (if so 168 * configured). 169 */ 170 static int 171 hwpstate_goto_pstate(device_t dev, int id) 172 { 173 sbintime_t sbt; 174 uint64_t msr; 175 int cpu, i, j, limit; 176 177 if (hwpstate_pstate_limit) { 178 /* get the current pstate limit */ 179 msr = rdmsr(MSR_AMD_10H_11H_LIMIT); 180 limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr); 181 if (limit > id) { 182 HWPSTATE_DEBUG(dev, "Restricting requested P%d to P%d " 183 "due to HW limit\n", id, limit); 184 id = limit; 185 } 186 } 187 188 cpu = curcpu; 189 HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, cpu); 190 /* Go To Px-state */ 191 wrmsr(MSR_AMD_10H_11H_CONTROL, id); 192 193 /* 194 * We are going to the same Px-state on all cpus. 195 * Probably should take _PSD into account. 196 */ 197 CPU_FOREACH(i) { 198 if (i == cpu) 199 continue; 200 201 /* Bind to each cpu. */ 202 thread_lock(curthread); 203 sched_bind(curthread, i); 204 thread_unlock(curthread); 205 HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, i); 206 /* Go To Px-state */ 207 wrmsr(MSR_AMD_10H_11H_CONTROL, id); 208 } 209 210 /* 211 * Verify whether each core is in the requested P-state. 212 */ 213 if (hwpstate_verify) { 214 CPU_FOREACH(i) { 215 thread_lock(curthread); 216 sched_bind(curthread, i); 217 thread_unlock(curthread); 218 /* wait loop (100*100 usec is enough ?) */ 219 for (j = 0; j < 100; j++) { 220 /* get the result. not assure msr=id */ 221 msr = rdmsr(MSR_AMD_10H_11H_STATUS); 222 if (msr == id) 223 break; 224 sbt = SBT_1MS / 10; 225 tsleep_sbt(dev, PZERO, "pstate_goto", sbt, 226 sbt >> tc_precexp, 0); 227 } 228 HWPSTATE_DEBUG(dev, "result: P%d-state on cpu%d\n", 229 (int)msr, i); 230 if (msr != id) { 231 HWPSTATE_DEBUG(dev, 232 "error: loop is not enough.\n"); 233 return (ENXIO); 234 } 235 } 236 } 237 238 return (0); 239 } 240 241 static int 242 hwpstate_set(device_t dev, const struct cf_setting *cf) 243 { 244 struct hwpstate_softc *sc; 245 struct hwpstate_setting *set; 246 int i; 247 248 if (cf == NULL) 249 return (EINVAL); 250 sc = device_get_softc(dev); 251 set = sc->hwpstate_settings; 252 for (i = 0; i < sc->cfnum; i++) 253 if (CPUFREQ_CMP(cf->freq, set[i].freq)) 254 break; 255 if (i == sc->cfnum) 256 return (EINVAL); 257 258 return (hwpstate_goto_pstate(dev, set[i].pstate_id)); 259 } 260 261 static int 262 hwpstate_get(device_t dev, struct cf_setting *cf) 263 { 264 struct hwpstate_softc *sc; 265 struct hwpstate_setting set; 266 uint64_t msr; 267 268 sc = device_get_softc(dev); 269 if (cf == NULL) 270 return (EINVAL); 271 msr = rdmsr(MSR_AMD_10H_11H_STATUS); 272 if (msr >= sc->cfnum) 273 return (EINVAL); 274 set = sc->hwpstate_settings[msr]; 275 276 cf->freq = set.freq; 277 cf->volts = set.volts; 278 cf->power = set.power; 279 cf->lat = set.lat; 280 cf->dev = dev; 281 return (0); 282 } 283 284 static int 285 hwpstate_settings(device_t dev, struct cf_setting *sets, int *count) 286 { 287 struct hwpstate_softc *sc; 288 struct hwpstate_setting set; 289 int i; 290 291 if (sets == NULL || count == NULL) 292 return (EINVAL); 293 sc = device_get_softc(dev); 294 if (*count < sc->cfnum) 295 return (E2BIG); 296 for (i = 0; i < sc->cfnum; i++, sets++) { 297 set = sc->hwpstate_settings[i]; 298 sets->freq = set.freq; 299 sets->volts = set.volts; 300 sets->power = set.power; 301 sets->lat = set.lat; 302 sets->dev = dev; 303 } 304 *count = sc->cfnum; 305 306 return (0); 307 } 308 309 static int 310 hwpstate_type(device_t dev, int *type) 311 { 312 313 if (type == NULL) 314 return (EINVAL); 315 316 *type = CPUFREQ_TYPE_ABSOLUTE; 317 return (0); 318 } 319 320 static void 321 hwpstate_identify(driver_t *driver, device_t parent) 322 { 323 324 if (device_find_child(parent, "hwpstate", -1) != NULL) 325 return; 326 327 if ((cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10) && 328 cpu_vendor_id != CPU_VENDOR_HYGON) 329 return; 330 331 /* 332 * Check if hardware pstate enable bit is set. 333 */ 334 if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) { 335 HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n"); 336 return; 337 } 338 339 if (resource_disabled("hwpstate", 0)) 340 return; 341 342 if (BUS_ADD_CHILD(parent, 10, "hwpstate", device_get_unit(parent)) 343 == NULL) 344 device_printf(parent, "hwpstate: add child failed\n"); 345 } 346 347 static int 348 hwpstate_probe(device_t dev) 349 { 350 struct hwpstate_softc *sc; 351 device_t perf_dev; 352 uint64_t msr; 353 int error, type; 354 355 /* 356 * Only hwpstate0. 357 * It goes well with acpi_throttle. 358 */ 359 if (device_get_unit(dev) != 0) 360 return (ENXIO); 361 362 sc = device_get_softc(dev); 363 sc->dev = dev; 364 365 /* 366 * Check if acpi_perf has INFO only flag. 367 */ 368 perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); 369 error = TRUE; 370 if (perf_dev && device_is_attached(perf_dev)) { 371 error = CPUFREQ_DRV_TYPE(perf_dev, &type); 372 if (error == 0) { 373 if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) { 374 /* 375 * If acpi_perf doesn't have INFO_ONLY flag, 376 * it will take care of pstate transitions. 377 */ 378 HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n"); 379 return (ENXIO); 380 } else { 381 /* 382 * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW) 383 * we can get _PSS info from acpi_perf 384 * without going into ACPI. 385 */ 386 HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n"); 387 error = hwpstate_get_info_from_acpi_perf(dev, perf_dev); 388 } 389 } 390 } 391 392 if (error == 0) { 393 /* 394 * Now we get _PSS info from acpi_perf without error. 395 * Let's check it. 396 */ 397 msr = rdmsr(MSR_AMD_10H_11H_LIMIT); 398 if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) { 399 HWPSTATE_DEBUG(dev, "MSR (%jd) and ACPI _PSS (%d)" 400 " count mismatch\n", (intmax_t)msr, sc->cfnum); 401 error = TRUE; 402 } 403 } 404 405 /* 406 * If we cannot get info from acpi_perf, 407 * Let's get info from MSRs. 408 */ 409 if (error) 410 error = hwpstate_get_info_from_msr(dev); 411 if (error) 412 return (error); 413 414 device_set_desc(dev, "Cool`n'Quiet 2.0"); 415 return (0); 416 } 417 418 static int 419 hwpstate_attach(device_t dev) 420 { 421 422 return (cpufreq_register(dev)); 423 } 424 425 static int 426 hwpstate_get_info_from_msr(device_t dev) 427 { 428 struct hwpstate_softc *sc; 429 struct hwpstate_setting *hwpstate_set; 430 uint64_t msr; 431 int family, i, fid, did; 432 433 family = CPUID_TO_FAMILY(cpu_id); 434 sc = device_get_softc(dev); 435 /* Get pstate count */ 436 msr = rdmsr(MSR_AMD_10H_11H_LIMIT); 437 sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr); 438 hwpstate_set = sc->hwpstate_settings; 439 for (i = 0; i < sc->cfnum; i++) { 440 msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i); 441 if ((msr & ((uint64_t)1 << 63)) == 0) { 442 HWPSTATE_DEBUG(dev, "msr is not valid.\n"); 443 return (ENXIO); 444 } 445 did = AMD_10H_11H_CUR_DID(msr); 446 fid = AMD_10H_11H_CUR_FID(msr); 447 448 hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN; 449 hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN; 450 hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN; 451 /* Convert fid/did to frequency. */ 452 switch (family) { 453 case 0x11: 454 hwpstate_set[i].freq = (100 * (fid + 0x08)) >> did; 455 break; 456 case 0x10: 457 case 0x12: 458 case 0x15: 459 case 0x16: 460 hwpstate_set[i].freq = (100 * (fid + 0x10)) >> did; 461 break; 462 case 0x17: 463 case 0x18: 464 did = AMD_17H_CUR_DID(msr); 465 if (did == 0) { 466 HWPSTATE_DEBUG(dev, "unexpected did: 0\n"); 467 did = 1; 468 } 469 fid = AMD_17H_CUR_FID(msr); 470 hwpstate_set[i].freq = (200 * fid) / did; 471 /* Vid step is 6.25mV, so scale by 100. */ 472 hwpstate_set[i].volts = 473 (155000 - (625 * AMD_17H_CUR_VID(msr))) / 100; 474 /* 475 * Calculate current first. 476 * This equation is mentioned in 477 * "BKDG for AMD Family 15h Models 70h-7fh Processors", 478 * section 2.5.2.1.6. 479 */ 480 hwpstate_set[i].power = AMD_17H_CUR_IDD(msr) * 1000; 481 switch (AMD_17H_CUR_IDIV(msr)) { 482 case 3: /* divide by 1000 */ 483 hwpstate_set[i].power /= 10; 484 case 2: /* divide by 100 */ 485 hwpstate_set[i].power /= 10; 486 case 1: /* divide by 10 */ 487 hwpstate_set[i].power /= 10; 488 case 0: /* divide by 1 */ 489 ; 490 } 491 hwpstate_set[i].power *= hwpstate_set[i].volts; 492 /* Milli amps * milli volts to milli watts. */ 493 hwpstate_set[i].power /= 1000; 494 break; 495 default: 496 HWPSTATE_DEBUG(dev, "get_info_from_msr: %s family" 497 " 0x%02x CPUs are not supported yet\n", 498 cpu_vendor_id == CPU_VENDOR_HYGON ? "Hygon" : "AMD", 499 family); 500 return (ENXIO); 501 } 502 hwpstate_set[i].pstate_id = i; 503 } 504 return (0); 505 } 506 507 static int 508 hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev) 509 { 510 struct hwpstate_softc *sc; 511 struct cf_setting *perf_set; 512 struct hwpstate_setting *hwpstate_set; 513 int count, error, i; 514 515 perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT); 516 if (perf_set == NULL) { 517 HWPSTATE_DEBUG(dev, "nomem\n"); 518 return (ENOMEM); 519 } 520 /* 521 * Fetch settings from acpi_perf. 522 * Now it is attached, and has info only flag. 523 */ 524 count = MAX_SETTINGS; 525 error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count); 526 if (error) { 527 HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n"); 528 goto out; 529 } 530 sc = device_get_softc(dev); 531 sc->cfnum = count; 532 hwpstate_set = sc->hwpstate_settings; 533 for (i = 0; i < count; i++) { 534 if (i == perf_set[i].spec[0]) { 535 hwpstate_set[i].pstate_id = i; 536 hwpstate_set[i].freq = perf_set[i].freq; 537 hwpstate_set[i].volts = perf_set[i].volts; 538 hwpstate_set[i].power = perf_set[i].power; 539 hwpstate_set[i].lat = perf_set[i].lat; 540 } else { 541 HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n"); 542 error = ENXIO; 543 goto out; 544 } 545 } 546 out: 547 if (perf_set) 548 free(perf_set, M_TEMP); 549 return (error); 550 } 551 552 static int 553 hwpstate_detach(device_t dev) 554 { 555 556 hwpstate_goto_pstate(dev, 0); 557 return (cpufreq_unregister(dev)); 558 } 559 560 static int 561 hwpstate_shutdown(device_t dev) 562 { 563 564 /* hwpstate_goto_pstate(dev, 0); */ 565 return (0); 566 } 567 568 static int 569 hwpstate_features(driver_t *driver, u_int *features) 570 { 571 572 /* Notify the ACPI CPU that we support direct access to MSRs */ 573 *features = ACPI_CAP_PERF_MSRS; 574 return (0); 575 } 576