1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2005 Nate Lawson 5 * Copyright (c) 2004 Colin Percival 6 * Copyright (c) 2004-2005 Bruno Durcot 7 * Copyright (c) 2004 FUKUDA Nobuhiko 8 * Copyright (c) 2009 Michael Reifenberger 9 * Copyright (c) 2009 Norikatsu Shigemura 10 * Copyright (c) 2008-2009 Gen Otsuji 11 * 12 * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c 13 * in various parts. The authors of these files are Nate Lawson, 14 * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko. 15 * This code contains patches by Michael Reifenberger and Norikatsu Shigemura. 16 * Thank you. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted providing that the following conditions 20 * are met: 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR 28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * For more info: 42 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors 43 * 31116 Rev 3.20 February 04, 2009 44 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors 45 * 41256 Rev 3.00 - July 07, 2008 46 */ 47 48 #include <sys/cdefs.h> 49 #include <sys/param.h> 50 #include <sys/bus.h> 51 #include <sys/cpu.h> 52 #include <sys/kernel.h> 53 #include <sys/module.h> 54 #include <sys/malloc.h> 55 #include <sys/proc.h> 56 #include <sys/pcpu.h> 57 #include <sys/smp.h> 58 #include <sys/sched.h> 59 60 #include <machine/md_var.h> 61 #include <machine/cputypes.h> 62 #include <machine/specialreg.h> 63 64 #include <contrib/dev/acpica/include/acpi.h> 65 66 #include <dev/acpica/acpivar.h> 67 68 #include "acpi_if.h" 69 #include "cpufreq_if.h" 70 71 #define MSR_AMD_10H_11H_LIMIT 0xc0010061 72 #define MSR_AMD_10H_11H_CONTROL 0xc0010062 73 #define MSR_AMD_10H_11H_STATUS 0xc0010063 74 #define MSR_AMD_10H_11H_CONFIG 0xc0010064 75 76 #define AMD_10H_11H_MAX_STATES 16 77 78 /* for MSR_AMD_10H_11H_LIMIT C001_0061 */ 79 #define AMD_10H_11H_GET_PSTATE_MAX_VAL(msr) (((msr) >> 4) & 0x7) 80 #define AMD_10H_11H_GET_PSTATE_LIMIT(msr) (((msr)) & 0x7) 81 /* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */ 82 #define AMD_10H_11H_CUR_VID(msr) (((msr) >> 9) & 0x7F) 83 #define AMD_10H_11H_CUR_DID(msr) (((msr) >> 6) & 0x07) 84 #define AMD_10H_11H_CUR_FID(msr) ((msr) & 0x3F) 85 86 #define AMD_17H_CUR_IDIV(msr) (((msr) >> 30) & 0x03) 87 #define AMD_17H_CUR_IDD(msr) (((msr) >> 22) & 0xFF) 88 #define AMD_17H_CUR_VID(msr) (((msr) >> 14) & 0xFF) 89 #define AMD_17H_CUR_DID(msr) (((msr) >> 8) & 0x3F) 90 #define AMD_17H_CUR_FID(msr) ((msr) & 0xFF) 91 92 #define HWPSTATE_DEBUG(dev, msg...) \ 93 do { \ 94 if (hwpstate_verbose) \ 95 device_printf(dev, msg); \ 96 } while (0) 97 98 struct hwpstate_setting { 99 int freq; /* CPU clock in Mhz or 100ths of a percent. */ 100 int volts; /* Voltage in mV. */ 101 int power; /* Power consumed in mW. */ 102 int lat; /* Transition latency in us. */ 103 int pstate_id; /* P-State id */ 104 }; 105 106 struct hwpstate_softc { 107 device_t dev; 108 struct hwpstate_setting hwpstate_settings[AMD_10H_11H_MAX_STATES]; 109 int cfnum; 110 }; 111 112 static void hwpstate_identify(driver_t *driver, device_t parent); 113 static int hwpstate_probe(device_t dev); 114 static int hwpstate_attach(device_t dev); 115 static int hwpstate_detach(device_t dev); 116 static int hwpstate_set(device_t dev, const struct cf_setting *cf); 117 static int hwpstate_get(device_t dev, struct cf_setting *cf); 118 static int hwpstate_settings(device_t dev, struct cf_setting *sets, int *count); 119 static int hwpstate_type(device_t dev, int *type); 120 static int hwpstate_shutdown(device_t dev); 121 static int hwpstate_features(driver_t *driver, u_int *features); 122 static int hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev); 123 static int hwpstate_get_info_from_msr(device_t dev); 124 static int hwpstate_goto_pstate(device_t dev, int pstate_id); 125 126 static int hwpstate_verbose; 127 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RWTUN, 128 &hwpstate_verbose, 0, "Debug hwpstate"); 129 130 static int hwpstate_verify; 131 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verify, CTLFLAG_RWTUN, 132 &hwpstate_verify, 0, "Verify P-state after setting"); 133 134 static bool hwpstate_pstate_limit; 135 SYSCTL_BOOL(_debug, OID_AUTO, hwpstate_pstate_limit, CTLFLAG_RWTUN, 136 &hwpstate_pstate_limit, 0, 137 "If enabled (1), limit administrative control of P-states to the value in " 138 "CurPstateLimit"); 139 140 static device_method_t hwpstate_methods[] = { 141 /* Device interface */ 142 DEVMETHOD(device_identify, hwpstate_identify), 143 DEVMETHOD(device_probe, hwpstate_probe), 144 DEVMETHOD(device_attach, hwpstate_attach), 145 DEVMETHOD(device_detach, hwpstate_detach), 146 DEVMETHOD(device_shutdown, hwpstate_shutdown), 147 148 /* cpufreq interface */ 149 DEVMETHOD(cpufreq_drv_set, hwpstate_set), 150 DEVMETHOD(cpufreq_drv_get, hwpstate_get), 151 DEVMETHOD(cpufreq_drv_settings, hwpstate_settings), 152 DEVMETHOD(cpufreq_drv_type, hwpstate_type), 153 154 /* ACPI interface */ 155 DEVMETHOD(acpi_get_features, hwpstate_features), 156 {0, 0} 157 }; 158 159 static driver_t hwpstate_driver = { 160 "hwpstate", 161 hwpstate_methods, 162 sizeof(struct hwpstate_softc), 163 }; 164 165 DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, 0, 0); 166 167 /* 168 * Go to Px-state on all cpus, considering the limit register (if so 169 * configured). 170 */ 171 static int 172 hwpstate_goto_pstate(device_t dev, int id) 173 { 174 sbintime_t sbt; 175 uint64_t msr; 176 int cpu, i, j, limit; 177 178 if (hwpstate_pstate_limit) { 179 /* get the current pstate limit */ 180 msr = rdmsr(MSR_AMD_10H_11H_LIMIT); 181 limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr); 182 if (limit > id) { 183 HWPSTATE_DEBUG(dev, "Restricting requested P%d to P%d " 184 "due to HW limit\n", id, limit); 185 id = limit; 186 } 187 } 188 189 cpu = curcpu; 190 HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, cpu); 191 /* Go To Px-state */ 192 wrmsr(MSR_AMD_10H_11H_CONTROL, id); 193 194 /* 195 * We are going to the same Px-state on all cpus. 196 * Probably should take _PSD into account. 197 */ 198 CPU_FOREACH(i) { 199 if (i == cpu) 200 continue; 201 202 /* Bind to each cpu. */ 203 thread_lock(curthread); 204 sched_bind(curthread, i); 205 thread_unlock(curthread); 206 HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, i); 207 /* Go To Px-state */ 208 wrmsr(MSR_AMD_10H_11H_CONTROL, id); 209 } 210 211 /* 212 * Verify whether each core is in the requested P-state. 213 */ 214 if (hwpstate_verify) { 215 CPU_FOREACH(i) { 216 thread_lock(curthread); 217 sched_bind(curthread, i); 218 thread_unlock(curthread); 219 /* wait loop (100*100 usec is enough ?) */ 220 for (j = 0; j < 100; j++) { 221 /* get the result. not assure msr=id */ 222 msr = rdmsr(MSR_AMD_10H_11H_STATUS); 223 if (msr == id) 224 break; 225 sbt = SBT_1MS / 10; 226 tsleep_sbt(dev, PZERO, "pstate_goto", sbt, 227 sbt >> tc_precexp, 0); 228 } 229 HWPSTATE_DEBUG(dev, "result: P%d-state on cpu%d\n", 230 (int)msr, i); 231 if (msr != id) { 232 HWPSTATE_DEBUG(dev, 233 "error: loop is not enough.\n"); 234 return (ENXIO); 235 } 236 } 237 } 238 239 return (0); 240 } 241 242 static int 243 hwpstate_set(device_t dev, const struct cf_setting *cf) 244 { 245 struct hwpstate_softc *sc; 246 struct hwpstate_setting *set; 247 int i; 248 249 if (cf == NULL) 250 return (EINVAL); 251 sc = device_get_softc(dev); 252 set = sc->hwpstate_settings; 253 for (i = 0; i < sc->cfnum; i++) 254 if (CPUFREQ_CMP(cf->freq, set[i].freq)) 255 break; 256 if (i == sc->cfnum) 257 return (EINVAL); 258 259 return (hwpstate_goto_pstate(dev, set[i].pstate_id)); 260 } 261 262 static int 263 hwpstate_get(device_t dev, struct cf_setting *cf) 264 { 265 struct hwpstate_softc *sc; 266 struct hwpstate_setting set; 267 uint64_t msr; 268 269 sc = device_get_softc(dev); 270 if (cf == NULL) 271 return (EINVAL); 272 msr = rdmsr(MSR_AMD_10H_11H_STATUS); 273 if (msr >= sc->cfnum) 274 return (EINVAL); 275 set = sc->hwpstate_settings[msr]; 276 277 cf->freq = set.freq; 278 cf->volts = set.volts; 279 cf->power = set.power; 280 cf->lat = set.lat; 281 cf->dev = dev; 282 return (0); 283 } 284 285 static int 286 hwpstate_settings(device_t dev, struct cf_setting *sets, int *count) 287 { 288 struct hwpstate_softc *sc; 289 struct hwpstate_setting set; 290 int i; 291 292 if (sets == NULL || count == NULL) 293 return (EINVAL); 294 sc = device_get_softc(dev); 295 if (*count < sc->cfnum) 296 return (E2BIG); 297 for (i = 0; i < sc->cfnum; i++, sets++) { 298 set = sc->hwpstate_settings[i]; 299 sets->freq = set.freq; 300 sets->volts = set.volts; 301 sets->power = set.power; 302 sets->lat = set.lat; 303 sets->dev = dev; 304 } 305 *count = sc->cfnum; 306 307 return (0); 308 } 309 310 static int 311 hwpstate_type(device_t dev, int *type) 312 { 313 314 if (type == NULL) 315 return (EINVAL); 316 317 *type = CPUFREQ_TYPE_ABSOLUTE; 318 return (0); 319 } 320 321 static void 322 hwpstate_identify(driver_t *driver, device_t parent) 323 { 324 325 if (device_find_child(parent, "hwpstate", -1) != NULL) 326 return; 327 328 if ((cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10) && 329 cpu_vendor_id != CPU_VENDOR_HYGON) 330 return; 331 332 /* 333 * Check if hardware pstate enable bit is set. 334 */ 335 if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) { 336 HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n"); 337 return; 338 } 339 340 if (resource_disabled("hwpstate", 0)) 341 return; 342 343 if (BUS_ADD_CHILD(parent, 10, "hwpstate", device_get_unit(parent)) 344 == NULL) 345 device_printf(parent, "hwpstate: add child failed\n"); 346 } 347 348 static int 349 hwpstate_probe(device_t dev) 350 { 351 struct hwpstate_softc *sc; 352 device_t perf_dev; 353 uint64_t msr; 354 int error, type; 355 356 /* 357 * Only hwpstate0. 358 * It goes well with acpi_throttle. 359 */ 360 if (device_get_unit(dev) != 0) 361 return (ENXIO); 362 363 sc = device_get_softc(dev); 364 sc->dev = dev; 365 366 /* 367 * Check if acpi_perf has INFO only flag. 368 */ 369 perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); 370 error = TRUE; 371 if (perf_dev && device_is_attached(perf_dev)) { 372 error = CPUFREQ_DRV_TYPE(perf_dev, &type); 373 if (error == 0) { 374 if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) { 375 /* 376 * If acpi_perf doesn't have INFO_ONLY flag, 377 * it will take care of pstate transitions. 378 */ 379 HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n"); 380 return (ENXIO); 381 } else { 382 /* 383 * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW) 384 * we can get _PSS info from acpi_perf 385 * without going into ACPI. 386 */ 387 HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n"); 388 error = hwpstate_get_info_from_acpi_perf(dev, perf_dev); 389 } 390 } 391 } 392 393 if (error == 0) { 394 /* 395 * Now we get _PSS info from acpi_perf without error. 396 * Let's check it. 397 */ 398 msr = rdmsr(MSR_AMD_10H_11H_LIMIT); 399 if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) { 400 HWPSTATE_DEBUG(dev, "MSR (%jd) and ACPI _PSS (%d)" 401 " count mismatch\n", (intmax_t)msr, sc->cfnum); 402 error = TRUE; 403 } 404 } 405 406 /* 407 * If we cannot get info from acpi_perf, 408 * Let's get info from MSRs. 409 */ 410 if (error) 411 error = hwpstate_get_info_from_msr(dev); 412 if (error) 413 return (error); 414 415 device_set_desc(dev, "Cool`n'Quiet 2.0"); 416 return (0); 417 } 418 419 static int 420 hwpstate_attach(device_t dev) 421 { 422 423 return (cpufreq_register(dev)); 424 } 425 426 static int 427 hwpstate_get_info_from_msr(device_t dev) 428 { 429 struct hwpstate_softc *sc; 430 struct hwpstate_setting *hwpstate_set; 431 uint64_t msr; 432 int family, i, fid, did; 433 434 family = CPUID_TO_FAMILY(cpu_id); 435 sc = device_get_softc(dev); 436 /* Get pstate count */ 437 msr = rdmsr(MSR_AMD_10H_11H_LIMIT); 438 sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr); 439 hwpstate_set = sc->hwpstate_settings; 440 for (i = 0; i < sc->cfnum; i++) { 441 msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i); 442 if ((msr & ((uint64_t)1 << 63)) == 0) { 443 HWPSTATE_DEBUG(dev, "msr is not valid.\n"); 444 return (ENXIO); 445 } 446 did = AMD_10H_11H_CUR_DID(msr); 447 fid = AMD_10H_11H_CUR_FID(msr); 448 449 hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN; 450 hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN; 451 hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN; 452 /* Convert fid/did to frequency. */ 453 switch (family) { 454 case 0x11: 455 hwpstate_set[i].freq = (100 * (fid + 0x08)) >> did; 456 break; 457 case 0x10: 458 case 0x12: 459 case 0x15: 460 case 0x16: 461 hwpstate_set[i].freq = (100 * (fid + 0x10)) >> did; 462 break; 463 case 0x17: 464 case 0x18: 465 did = AMD_17H_CUR_DID(msr); 466 if (did == 0) { 467 HWPSTATE_DEBUG(dev, "unexpected did: 0\n"); 468 did = 1; 469 } 470 fid = AMD_17H_CUR_FID(msr); 471 hwpstate_set[i].freq = (200 * fid) / did; 472 /* Vid step is 6.25mV, so scale by 100. */ 473 hwpstate_set[i].volts = 474 (155000 - (625 * AMD_17H_CUR_VID(msr))) / 100; 475 /* 476 * Calculate current first. 477 * This equation is mentioned in 478 * "BKDG for AMD Family 15h Models 70h-7fh Processors", 479 * section 2.5.2.1.6. 480 */ 481 hwpstate_set[i].power = AMD_17H_CUR_IDD(msr) * 1000; 482 switch (AMD_17H_CUR_IDIV(msr)) { 483 case 3: /* divide by 1000 */ 484 hwpstate_set[i].power /= 10; 485 case 2: /* divide by 100 */ 486 hwpstate_set[i].power /= 10; 487 case 1: /* divide by 10 */ 488 hwpstate_set[i].power /= 10; 489 case 0: /* divide by 1 */ 490 ; 491 } 492 hwpstate_set[i].power *= hwpstate_set[i].volts; 493 /* Milli amps * milli volts to milli watts. */ 494 hwpstate_set[i].power /= 1000; 495 break; 496 default: 497 HWPSTATE_DEBUG(dev, "get_info_from_msr: %s family" 498 " 0x%02x CPUs are not supported yet\n", 499 cpu_vendor_id == CPU_VENDOR_HYGON ? "Hygon" : "AMD", 500 family); 501 return (ENXIO); 502 } 503 hwpstate_set[i].pstate_id = i; 504 } 505 return (0); 506 } 507 508 static int 509 hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev) 510 { 511 struct hwpstate_softc *sc; 512 struct cf_setting *perf_set; 513 struct hwpstate_setting *hwpstate_set; 514 int count, error, i; 515 516 perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT); 517 if (perf_set == NULL) { 518 HWPSTATE_DEBUG(dev, "nomem\n"); 519 return (ENOMEM); 520 } 521 /* 522 * Fetch settings from acpi_perf. 523 * Now it is attached, and has info only flag. 524 */ 525 count = MAX_SETTINGS; 526 error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count); 527 if (error) { 528 HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n"); 529 goto out; 530 } 531 sc = device_get_softc(dev); 532 sc->cfnum = count; 533 hwpstate_set = sc->hwpstate_settings; 534 for (i = 0; i < count; i++) { 535 if (i == perf_set[i].spec[0]) { 536 hwpstate_set[i].pstate_id = i; 537 hwpstate_set[i].freq = perf_set[i].freq; 538 hwpstate_set[i].volts = perf_set[i].volts; 539 hwpstate_set[i].power = perf_set[i].power; 540 hwpstate_set[i].lat = perf_set[i].lat; 541 } else { 542 HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n"); 543 error = ENXIO; 544 goto out; 545 } 546 } 547 out: 548 if (perf_set) 549 free(perf_set, M_TEMP); 550 return (error); 551 } 552 553 static int 554 hwpstate_detach(device_t dev) 555 { 556 557 hwpstate_goto_pstate(dev, 0); 558 return (cpufreq_unregister(dev)); 559 } 560 561 static int 562 hwpstate_shutdown(device_t dev) 563 { 564 565 /* hwpstate_goto_pstate(dev, 0); */ 566 return (0); 567 } 568 569 static int 570 hwpstate_features(driver_t *driver, u_int *features) 571 { 572 573 /* Notify the ACPI CPU that we support direct access to MSRs */ 574 *features = ACPI_CAP_PERF_MSRS; 575 return (0); 576 } 577