1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2005 Nate Lawson
5 * Copyright (c) 2004 Colin Percival
6 * Copyright (c) 2004-2005 Bruno Durcot
7 * Copyright (c) 2004 FUKUDA Nobuhiko
8 * Copyright (c) 2009 Michael Reifenberger
9 * Copyright (c) 2009 Norikatsu Shigemura
10 * Copyright (c) 2008-2009 Gen Otsuji
11 *
12 * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c
13 * in various parts. The authors of these files are Nate Lawson,
14 * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko.
15 * This code contains patches by Michael Reifenberger and Norikatsu Shigemura.
16 * Thank you.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted providing that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * For more info:
42 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors
43 * 31116 Rev 3.20 February 04, 2009
44 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors
45 * 41256 Rev 3.00 - July 07, 2008
46 * Processor Programming Reference (PPR) for AMD Family 1Ah Model 02h,
47 * Revision C1 Processors Volume 1 of 7 - Sep 29, 2024
48 */
49
50 #include <sys/param.h>
51 #include <sys/bus.h>
52 #include <sys/cpu.h>
53 #include <sys/kernel.h>
54 #include <sys/module.h>
55 #include <sys/malloc.h>
56 #include <sys/proc.h>
57 #include <sys/pcpu.h>
58 #include <sys/smp.h>
59 #include <sys/sched.h>
60
61 #include <machine/md_var.h>
62 #include <machine/cputypes.h>
63 #include <machine/specialreg.h>
64
65 #include <contrib/dev/acpica/include/acpi.h>
66
67 #include <dev/acpica/acpivar.h>
68
69 #include "acpi_if.h"
70 #include "cpufreq_if.h"
71
72 #define MSR_AMD_10H_11H_LIMIT 0xc0010061
73 #define MSR_AMD_10H_11H_CONTROL 0xc0010062
74 #define MSR_AMD_10H_11H_STATUS 0xc0010063
75 #define MSR_AMD_10H_11H_CONFIG 0xc0010064
76
77 #define AMD_10H_11H_MAX_STATES 16
78
79 /* for MSR_AMD_10H_11H_LIMIT C001_0061 */
80 #define AMD_10H_11H_GET_PSTATE_MAX_VAL(msr) (((msr) >> 4) & 0x7)
81 #define AMD_10H_11H_GET_PSTATE_LIMIT(msr) (((msr)) & 0x7)
82 /* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */
83 #define AMD_10H_11H_CUR_VID(msr) (((msr) >> 9) & 0x7F)
84 #define AMD_10H_11H_CUR_DID(msr) (((msr) >> 6) & 0x07)
85 #define AMD_10H_11H_CUR_FID(msr) ((msr) & 0x3F)
86
87 #define AMD_17H_CUR_IDIV(msr) (((msr) >> 30) & 0x03)
88 #define AMD_17H_CUR_IDD(msr) (((msr) >> 22) & 0xFF)
89 #define AMD_17H_CUR_VID(msr) (((msr) >> 14) & 0xFF)
90 #define AMD_17H_CUR_DID(msr) (((msr) >> 8) & 0x3F)
91 #define AMD_17H_CUR_FID(msr) ((msr) & 0xFF)
92
93 #define AMD_1AH_CUR_FID(msr) ((msr) & 0xFFF)
94
95 #define HWPSTATE_DEBUG(dev, msg...) \
96 do { \
97 if (hwpstate_verbose) \
98 device_printf(dev, msg); \
99 } while (0)
100
101 struct hwpstate_setting {
102 int freq; /* CPU clock in Mhz or 100ths of a percent. */
103 int volts; /* Voltage in mV. */
104 int power; /* Power consumed in mW. */
105 int lat; /* Transition latency in us. */
106 int pstate_id; /* P-State id */
107 };
108
109 struct hwpstate_softc {
110 device_t dev;
111 struct hwpstate_setting hwpstate_settings[AMD_10H_11H_MAX_STATES];
112 int cfnum;
113 };
114
115 static void hwpstate_identify(driver_t *driver, device_t parent);
116 static int hwpstate_probe(device_t dev);
117 static int hwpstate_attach(device_t dev);
118 static int hwpstate_detach(device_t dev);
119 static int hwpstate_set(device_t dev, const struct cf_setting *cf);
120 static int hwpstate_get(device_t dev, struct cf_setting *cf);
121 static int hwpstate_settings(device_t dev, struct cf_setting *sets, int *count);
122 static int hwpstate_type(device_t dev, int *type);
123 static int hwpstate_shutdown(device_t dev);
124 static int hwpstate_features(driver_t *driver, u_int *features);
125 static int hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev);
126 static int hwpstate_get_info_from_msr(device_t dev);
127 static int hwpstate_goto_pstate(device_t dev, int pstate_id);
128
129 static int hwpstate_verbose;
130 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RWTUN,
131 &hwpstate_verbose, 0, "Debug hwpstate");
132
133 static int hwpstate_verify;
134 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verify, CTLFLAG_RWTUN,
135 &hwpstate_verify, 0, "Verify P-state after setting");
136
137 static bool hwpstate_pstate_limit;
138 SYSCTL_BOOL(_debug, OID_AUTO, hwpstate_pstate_limit, CTLFLAG_RWTUN,
139 &hwpstate_pstate_limit, 0,
140 "If enabled (1), limit administrative control of P-states to the value in "
141 "CurPstateLimit");
142
143 static device_method_t hwpstate_methods[] = {
144 /* Device interface */
145 DEVMETHOD(device_identify, hwpstate_identify),
146 DEVMETHOD(device_probe, hwpstate_probe),
147 DEVMETHOD(device_attach, hwpstate_attach),
148 DEVMETHOD(device_detach, hwpstate_detach),
149 DEVMETHOD(device_shutdown, hwpstate_shutdown),
150
151 /* cpufreq interface */
152 DEVMETHOD(cpufreq_drv_set, hwpstate_set),
153 DEVMETHOD(cpufreq_drv_get, hwpstate_get),
154 DEVMETHOD(cpufreq_drv_settings, hwpstate_settings),
155 DEVMETHOD(cpufreq_drv_type, hwpstate_type),
156
157 /* ACPI interface */
158 DEVMETHOD(acpi_get_features, hwpstate_features),
159 {0, 0}
160 };
161
162 static driver_t hwpstate_driver = {
163 "hwpstate",
164 hwpstate_methods,
165 sizeof(struct hwpstate_softc),
166 };
167
168 DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, 0, 0);
169
170 static int
hwpstate_amd_iscale(int val,int div)171 hwpstate_amd_iscale(int val, int div)
172 {
173 switch (div) {
174 case 3: /* divide by 1000 */
175 val /= 10;
176 case 2: /* divide by 100 */
177 val /= 10;
178 case 1: /* divide by 10 */
179 val /= 10;
180 case 0: /* divide by 1 */
181 ;
182 }
183
184 return (val);
185 }
186
187 /*
188 * Go to Px-state on all cpus, considering the limit register (if so
189 * configured).
190 */
191 static int
hwpstate_goto_pstate(device_t dev,int id)192 hwpstate_goto_pstate(device_t dev, int id)
193 {
194 sbintime_t sbt;
195 uint64_t msr;
196 int cpu, i, j, limit;
197
198 if (hwpstate_pstate_limit) {
199 /* get the current pstate limit */
200 msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
201 limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr);
202 if (limit > id) {
203 HWPSTATE_DEBUG(dev, "Restricting requested P%d to P%d "
204 "due to HW limit\n", id, limit);
205 id = limit;
206 }
207 }
208
209 cpu = curcpu;
210 HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, cpu);
211 /* Go To Px-state */
212 wrmsr(MSR_AMD_10H_11H_CONTROL, id);
213
214 /*
215 * We are going to the same Px-state on all cpus.
216 * Probably should take _PSD into account.
217 */
218 CPU_FOREACH(i) {
219 if (i == cpu)
220 continue;
221
222 /* Bind to each cpu. */
223 thread_lock(curthread);
224 sched_bind(curthread, i);
225 thread_unlock(curthread);
226 HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, i);
227 /* Go To Px-state */
228 wrmsr(MSR_AMD_10H_11H_CONTROL, id);
229 }
230
231 /*
232 * Verify whether each core is in the requested P-state.
233 */
234 if (hwpstate_verify) {
235 CPU_FOREACH(i) {
236 thread_lock(curthread);
237 sched_bind(curthread, i);
238 thread_unlock(curthread);
239 /* wait loop (100*100 usec is enough ?) */
240 for (j = 0; j < 100; j++) {
241 /* get the result. not assure msr=id */
242 msr = rdmsr(MSR_AMD_10H_11H_STATUS);
243 if (msr == id)
244 break;
245 sbt = SBT_1MS / 10;
246 tsleep_sbt(dev, PZERO, "pstate_goto", sbt,
247 sbt >> tc_precexp, 0);
248 }
249 HWPSTATE_DEBUG(dev, "result: P%d-state on cpu%d\n",
250 (int)msr, i);
251 if (msr != id) {
252 HWPSTATE_DEBUG(dev,
253 "error: loop is not enough.\n");
254 return (ENXIO);
255 }
256 }
257 }
258
259 return (0);
260 }
261
262 static int
hwpstate_set(device_t dev,const struct cf_setting * cf)263 hwpstate_set(device_t dev, const struct cf_setting *cf)
264 {
265 struct hwpstate_softc *sc;
266 struct hwpstate_setting *set;
267 int i;
268
269 if (cf == NULL)
270 return (EINVAL);
271 sc = device_get_softc(dev);
272 set = sc->hwpstate_settings;
273 for (i = 0; i < sc->cfnum; i++)
274 if (CPUFREQ_CMP(cf->freq, set[i].freq))
275 break;
276 if (i == sc->cfnum)
277 return (EINVAL);
278
279 return (hwpstate_goto_pstate(dev, set[i].pstate_id));
280 }
281
282 static int
hwpstate_get(device_t dev,struct cf_setting * cf)283 hwpstate_get(device_t dev, struct cf_setting *cf)
284 {
285 struct hwpstate_softc *sc;
286 struct hwpstate_setting set;
287 uint64_t msr;
288
289 sc = device_get_softc(dev);
290 if (cf == NULL)
291 return (EINVAL);
292 msr = rdmsr(MSR_AMD_10H_11H_STATUS);
293 if (msr >= sc->cfnum)
294 return (EINVAL);
295 set = sc->hwpstate_settings[msr];
296
297 cf->freq = set.freq;
298 cf->volts = set.volts;
299 cf->power = set.power;
300 cf->lat = set.lat;
301 cf->dev = dev;
302 return (0);
303 }
304
305 static int
hwpstate_settings(device_t dev,struct cf_setting * sets,int * count)306 hwpstate_settings(device_t dev, struct cf_setting *sets, int *count)
307 {
308 struct hwpstate_softc *sc;
309 struct hwpstate_setting set;
310 int i;
311
312 if (sets == NULL || count == NULL)
313 return (EINVAL);
314 sc = device_get_softc(dev);
315 if (*count < sc->cfnum)
316 return (E2BIG);
317 for (i = 0; i < sc->cfnum; i++, sets++) {
318 set = sc->hwpstate_settings[i];
319 sets->freq = set.freq;
320 sets->volts = set.volts;
321 sets->power = set.power;
322 sets->lat = set.lat;
323 sets->dev = dev;
324 }
325 *count = sc->cfnum;
326
327 return (0);
328 }
329
330 static int
hwpstate_type(device_t dev,int * type)331 hwpstate_type(device_t dev, int *type)
332 {
333
334 if (type == NULL)
335 return (EINVAL);
336
337 *type = CPUFREQ_TYPE_ABSOLUTE;
338 return (0);
339 }
340
341 static void
hwpstate_identify(driver_t * driver,device_t parent)342 hwpstate_identify(driver_t *driver, device_t parent)
343 {
344
345 if (device_find_child(parent, "hwpstate", DEVICE_UNIT_ANY) != NULL)
346 return;
347
348 if ((cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10) &&
349 cpu_vendor_id != CPU_VENDOR_HYGON)
350 return;
351
352 /*
353 * Check if hardware pstate enable bit is set.
354 */
355 if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) {
356 HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n");
357 return;
358 }
359
360 if (resource_disabled("hwpstate", 0))
361 return;
362
363 if (BUS_ADD_CHILD(parent, 10, "hwpstate", device_get_unit(parent))
364 == NULL)
365 device_printf(parent, "hwpstate: add child failed\n");
366 }
367
368 static int
hwpstate_probe(device_t dev)369 hwpstate_probe(device_t dev)
370 {
371 struct hwpstate_softc *sc;
372 device_t perf_dev;
373 uint64_t msr;
374 int error, type;
375
376 /*
377 * Only hwpstate0.
378 * It goes well with acpi_throttle.
379 */
380 if (device_get_unit(dev) != 0)
381 return (ENXIO);
382
383 sc = device_get_softc(dev);
384 sc->dev = dev;
385
386 /*
387 * Check if acpi_perf has INFO only flag.
388 */
389 perf_dev = device_find_child(device_get_parent(dev), "acpi_perf",
390 DEVICE_UNIT_ANY);
391 error = TRUE;
392 if (perf_dev && device_is_attached(perf_dev)) {
393 error = CPUFREQ_DRV_TYPE(perf_dev, &type);
394 if (error == 0) {
395 if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) {
396 /*
397 * If acpi_perf doesn't have INFO_ONLY flag,
398 * it will take care of pstate transitions.
399 */
400 HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n");
401 return (ENXIO);
402 } else {
403 /*
404 * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW)
405 * we can get _PSS info from acpi_perf
406 * without going into ACPI.
407 */
408 HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n");
409 error = hwpstate_get_info_from_acpi_perf(dev, perf_dev);
410 }
411 }
412 }
413
414 if (error == 0) {
415 /*
416 * Now we get _PSS info from acpi_perf without error.
417 * Let's check it.
418 */
419 msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
420 if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) {
421 HWPSTATE_DEBUG(dev, "MSR (%jd) and ACPI _PSS (%d)"
422 " count mismatch\n", (intmax_t)msr, sc->cfnum);
423 error = TRUE;
424 }
425 }
426
427 /*
428 * If we cannot get info from acpi_perf,
429 * Let's get info from MSRs.
430 */
431 if (error)
432 error = hwpstate_get_info_from_msr(dev);
433 if (error)
434 return (error);
435
436 device_set_desc(dev, "Cool`n'Quiet 2.0");
437 return (0);
438 }
439
440 static int
hwpstate_attach(device_t dev)441 hwpstate_attach(device_t dev)
442 {
443
444 return (cpufreq_register(dev));
445 }
446
447 static int
hwpstate_get_info_from_msr(device_t dev)448 hwpstate_get_info_from_msr(device_t dev)
449 {
450 struct hwpstate_softc *sc;
451 struct hwpstate_setting *hwpstate_set;
452 uint64_t msr;
453 int family, i, fid, did;
454
455 family = CPUID_TO_FAMILY(cpu_id);
456 sc = device_get_softc(dev);
457 /* Get pstate count */
458 msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
459 sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr);
460 hwpstate_set = sc->hwpstate_settings;
461 for (i = 0; i < sc->cfnum; i++) {
462 msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i);
463 if ((msr & ((uint64_t)1 << 63)) == 0) {
464 HWPSTATE_DEBUG(dev, "msr is not valid.\n");
465 return (ENXIO);
466 }
467 did = AMD_10H_11H_CUR_DID(msr);
468 fid = AMD_10H_11H_CUR_FID(msr);
469
470 hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN;
471 hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN;
472 hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN;
473 /* Convert fid/did to frequency. */
474 switch (family) {
475 case 0x11:
476 hwpstate_set[i].freq = (100 * (fid + 0x08)) >> did;
477 break;
478 case 0x10:
479 case 0x12:
480 case 0x15:
481 case 0x16:
482 hwpstate_set[i].freq = (100 * (fid + 0x10)) >> did;
483 break;
484 case 0x17:
485 case 0x18:
486 case 0x19:
487 case 0x1A:
488 /* calculate freq */
489 if (family == 0x1A) {
490 fid = AMD_1AH_CUR_FID(msr);
491 /* 1Ah CPU don't use a divisor */
492 hwpstate_set[i].freq = fid;
493 if (fid > 0x0f)
494 hwpstate_set[i].freq *= 5;
495 else {
496 HWPSTATE_DEBUG(dev,
497 "unexpected fid: %d\n", fid);
498 return (ENXIO);
499 }
500 } else {
501 did = AMD_17H_CUR_DID(msr);
502 if (did == 0) {
503 HWPSTATE_DEBUG(dev,
504 "unexpected did: 0\n");
505 did = 1;
506 }
507 fid = AMD_17H_CUR_FID(msr);
508 hwpstate_set[i].freq = (200 * fid) / did;
509 }
510
511 /* Vid step is 6.25mV, so scale by 100. */
512 hwpstate_set[i].volts =
513 (155000 - (625 * AMD_17H_CUR_VID(msr))) / 100;
514 /*
515 * Calculate current first.
516 * This equation is mentioned in
517 * "BKDG for AMD Family 15h Models 70h-7fh Processors",
518 * section 2.5.2.1.6.
519 */
520 hwpstate_set[i].power = AMD_17H_CUR_IDD(msr) * 1000;
521 hwpstate_set[i].power = hwpstate_amd_iscale(
522 hwpstate_set[i].power, AMD_17H_CUR_IDIV(msr));
523 hwpstate_set[i].power *= hwpstate_set[i].volts;
524 /* Milli amps * milli volts to milli watts. */
525 hwpstate_set[i].power /= 1000;
526 break;
527 default:
528 HWPSTATE_DEBUG(dev, "get_info_from_msr: %s family"
529 " 0x%02x CPUs are not supported yet\n",
530 cpu_vendor_id == CPU_VENDOR_HYGON ? "Hygon" : "AMD",
531 family);
532 return (ENXIO);
533 }
534 hwpstate_set[i].pstate_id = i;
535 }
536 return (0);
537 }
538
539 static int
hwpstate_get_info_from_acpi_perf(device_t dev,device_t perf_dev)540 hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev)
541 {
542 struct hwpstate_softc *sc;
543 struct cf_setting *perf_set;
544 struct hwpstate_setting *hwpstate_set;
545 int count, error, i;
546
547 perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT);
548 if (perf_set == NULL) {
549 HWPSTATE_DEBUG(dev, "nomem\n");
550 return (ENOMEM);
551 }
552 /*
553 * Fetch settings from acpi_perf.
554 * Now it is attached, and has info only flag.
555 */
556 count = MAX_SETTINGS;
557 error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count);
558 if (error) {
559 HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n");
560 goto out;
561 }
562 sc = device_get_softc(dev);
563 sc->cfnum = count;
564 hwpstate_set = sc->hwpstate_settings;
565 for (i = 0; i < count; i++) {
566 if (i == perf_set[i].spec[0]) {
567 hwpstate_set[i].pstate_id = i;
568 hwpstate_set[i].freq = perf_set[i].freq;
569 hwpstate_set[i].volts = perf_set[i].volts;
570 hwpstate_set[i].power = perf_set[i].power;
571 hwpstate_set[i].lat = perf_set[i].lat;
572 } else {
573 HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n");
574 error = ENXIO;
575 goto out;
576 }
577 }
578 out:
579 if (perf_set)
580 free(perf_set, M_TEMP);
581 return (error);
582 }
583
584 static int
hwpstate_detach(device_t dev)585 hwpstate_detach(device_t dev)
586 {
587
588 hwpstate_goto_pstate(dev, 0);
589 return (cpufreq_unregister(dev));
590 }
591
592 static int
hwpstate_shutdown(device_t dev)593 hwpstate_shutdown(device_t dev)
594 {
595
596 /* hwpstate_goto_pstate(dev, 0); */
597 return (0);
598 }
599
600 static int
hwpstate_features(driver_t * driver,u_int * features)601 hwpstate_features(driver_t *driver, u_int *features)
602 {
603
604 /* Notify the ACPI CPU that we support direct access to MSRs */
605 *features = ACPI_CAP_PERF_MSRS;
606 return (0);
607 }
608