xref: /freebsd/sys/x86/cpufreq/hwpstate_amd.c (revision 3332f1b444d4a73238e9f59cca27bfc95fe936bd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2005 Nate Lawson
5  * Copyright (c) 2004 Colin Percival
6  * Copyright (c) 2004-2005 Bruno Durcot
7  * Copyright (c) 2004 FUKUDA Nobuhiko
8  * Copyright (c) 2009 Michael Reifenberger
9  * Copyright (c) 2009 Norikatsu Shigemura
10  * Copyright (c) 2008-2009 Gen Otsuji
11  *
12  * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c
13  * in various parts. The authors of these files are Nate Lawson,
14  * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko.
15  * This code contains patches by Michael Reifenberger and Norikatsu Shigemura.
16  * Thank you.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted providing that the following conditions
20  * are met:
21  * 1. Redistributions of source code must retain the above copyright
22  *    notice, this list of conditions and the following disclaimer.
23  * 2. Redistributions in binary form must reproduce the above copyright
24  *    notice, this list of conditions and the following disclaimer in the
25  *    documentation and/or other materials provided with the distribution.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
28  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
31  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * For more info:
42  * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors
43  * 31116 Rev 3.20  February 04, 2009
44  * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors
45  * 41256 Rev 3.00 - July 07, 2008
46  */
47 
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50 
51 #include <sys/param.h>
52 #include <sys/bus.h>
53 #include <sys/cpu.h>
54 #include <sys/kernel.h>
55 #include <sys/module.h>
56 #include <sys/malloc.h>
57 #include <sys/proc.h>
58 #include <sys/pcpu.h>
59 #include <sys/smp.h>
60 #include <sys/sched.h>
61 
62 #include <machine/md_var.h>
63 #include <machine/cputypes.h>
64 #include <machine/specialreg.h>
65 
66 #include <contrib/dev/acpica/include/acpi.h>
67 
68 #include <dev/acpica/acpivar.h>
69 
70 #include "acpi_if.h"
71 #include "cpufreq_if.h"
72 
73 #define	MSR_AMD_10H_11H_LIMIT	0xc0010061
74 #define	MSR_AMD_10H_11H_CONTROL	0xc0010062
75 #define	MSR_AMD_10H_11H_STATUS	0xc0010063
76 #define	MSR_AMD_10H_11H_CONFIG	0xc0010064
77 
78 #define	AMD_10H_11H_MAX_STATES	16
79 
80 /* for MSR_AMD_10H_11H_LIMIT C001_0061 */
81 #define	AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)	(((msr) >> 4) & 0x7)
82 #define	AMD_10H_11H_GET_PSTATE_LIMIT(msr)	(((msr)) & 0x7)
83 /* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */
84 #define	AMD_10H_11H_CUR_VID(msr)		(((msr) >> 9) & 0x7F)
85 #define	AMD_10H_11H_CUR_DID(msr)		(((msr) >> 6) & 0x07)
86 #define	AMD_10H_11H_CUR_FID(msr)		((msr) & 0x3F)
87 
88 #define	AMD_17H_CUR_VID(msr)			(((msr) >> 14) & 0xFF)
89 #define	AMD_17H_CUR_DID(msr)			(((msr) >> 8) & 0x3F)
90 #define	AMD_17H_CUR_FID(msr)			((msr) & 0xFF)
91 
92 #define	HWPSTATE_DEBUG(dev, msg...)			\
93 	do {						\
94 		if (hwpstate_verbose)			\
95 			device_printf(dev, msg);	\
96 	} while (0)
97 
98 struct hwpstate_setting {
99 	int	freq;		/* CPU clock in Mhz or 100ths of a percent. */
100 	int	volts;		/* Voltage in mV. */
101 	int	power;		/* Power consumed in mW. */
102 	int	lat;		/* Transition latency in us. */
103 	int	pstate_id;	/* P-State id */
104 };
105 
106 struct hwpstate_softc {
107 	device_t		dev;
108 	struct hwpstate_setting	hwpstate_settings[AMD_10H_11H_MAX_STATES];
109 	int			cfnum;
110 };
111 
112 static void	hwpstate_identify(driver_t *driver, device_t parent);
113 static int	hwpstate_probe(device_t dev);
114 static int	hwpstate_attach(device_t dev);
115 static int	hwpstate_detach(device_t dev);
116 static int	hwpstate_set(device_t dev, const struct cf_setting *cf);
117 static int	hwpstate_get(device_t dev, struct cf_setting *cf);
118 static int	hwpstate_settings(device_t dev, struct cf_setting *sets, int *count);
119 static int	hwpstate_type(device_t dev, int *type);
120 static int	hwpstate_shutdown(device_t dev);
121 static int	hwpstate_features(driver_t *driver, u_int *features);
122 static int	hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev);
123 static int	hwpstate_get_info_from_msr(device_t dev);
124 static int	hwpstate_goto_pstate(device_t dev, int pstate_id);
125 
126 static int	hwpstate_verbose;
127 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RWTUN,
128     &hwpstate_verbose, 0, "Debug hwpstate");
129 
130 static int	hwpstate_verify;
131 SYSCTL_INT(_debug, OID_AUTO, hwpstate_verify, CTLFLAG_RWTUN,
132     &hwpstate_verify, 0, "Verify P-state after setting");
133 
134 static bool	hwpstate_pstate_limit;
135 SYSCTL_BOOL(_debug, OID_AUTO, hwpstate_pstate_limit, CTLFLAG_RWTUN,
136     &hwpstate_pstate_limit, 0,
137     "If enabled (1), limit administrative control of P-states to the value in "
138     "CurPstateLimit");
139 
140 static device_method_t hwpstate_methods[] = {
141 	/* Device interface */
142 	DEVMETHOD(device_identify,	hwpstate_identify),
143 	DEVMETHOD(device_probe,		hwpstate_probe),
144 	DEVMETHOD(device_attach,	hwpstate_attach),
145 	DEVMETHOD(device_detach,	hwpstate_detach),
146 	DEVMETHOD(device_shutdown,	hwpstate_shutdown),
147 
148 	/* cpufreq interface */
149 	DEVMETHOD(cpufreq_drv_set,	hwpstate_set),
150 	DEVMETHOD(cpufreq_drv_get,	hwpstate_get),
151 	DEVMETHOD(cpufreq_drv_settings,	hwpstate_settings),
152 	DEVMETHOD(cpufreq_drv_type,	hwpstate_type),
153 
154 	/* ACPI interface */
155 	DEVMETHOD(acpi_get_features,	hwpstate_features),
156 	{0, 0}
157 };
158 
159 static devclass_t hwpstate_devclass;
160 static driver_t hwpstate_driver = {
161 	"hwpstate",
162 	hwpstate_methods,
163 	sizeof(struct hwpstate_softc),
164 };
165 
166 DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, hwpstate_devclass, 0, 0);
167 
168 /*
169  * Go to Px-state on all cpus, considering the limit register (if so
170  * configured).
171  */
172 static int
173 hwpstate_goto_pstate(device_t dev, int id)
174 {
175 	sbintime_t sbt;
176 	uint64_t msr;
177 	int cpu, i, j, limit;
178 
179 	if (hwpstate_pstate_limit) {
180 		/* get the current pstate limit */
181 		msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
182 		limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr);
183 		if (limit > id) {
184 			HWPSTATE_DEBUG(dev, "Restricting requested P%d to P%d "
185 			    "due to HW limit\n", id, limit);
186 			id = limit;
187 		}
188 	}
189 
190 	cpu = curcpu;
191 	HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, cpu);
192 	/* Go To Px-state */
193 	wrmsr(MSR_AMD_10H_11H_CONTROL, id);
194 
195 	/*
196 	 * We are going to the same Px-state on all cpus.
197 	 * Probably should take _PSD into account.
198 	 */
199 	CPU_FOREACH(i) {
200 		if (i == cpu)
201 			continue;
202 
203 		/* Bind to each cpu. */
204 		thread_lock(curthread);
205 		sched_bind(curthread, i);
206 		thread_unlock(curthread);
207 		HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, i);
208 		/* Go To Px-state */
209 		wrmsr(MSR_AMD_10H_11H_CONTROL, id);
210 	}
211 
212 	/*
213 	 * Verify whether each core is in the requested P-state.
214 	 */
215 	if (hwpstate_verify) {
216 		CPU_FOREACH(i) {
217 			thread_lock(curthread);
218 			sched_bind(curthread, i);
219 			thread_unlock(curthread);
220 			/* wait loop (100*100 usec is enough ?) */
221 			for (j = 0; j < 100; j++) {
222 				/* get the result. not assure msr=id */
223 				msr = rdmsr(MSR_AMD_10H_11H_STATUS);
224 				if (msr == id)
225 					break;
226 				sbt = SBT_1MS / 10;
227 				tsleep_sbt(dev, PZERO, "pstate_goto", sbt,
228 				    sbt >> tc_precexp, 0);
229 			}
230 			HWPSTATE_DEBUG(dev, "result: P%d-state on cpu%d\n",
231 			    (int)msr, i);
232 			if (msr != id) {
233 				HWPSTATE_DEBUG(dev,
234 				    "error: loop is not enough.\n");
235 				return (ENXIO);
236 			}
237 		}
238 	}
239 
240 	return (0);
241 }
242 
243 static int
244 hwpstate_set(device_t dev, const struct cf_setting *cf)
245 {
246 	struct hwpstate_softc *sc;
247 	struct hwpstate_setting *set;
248 	int i;
249 
250 	if (cf == NULL)
251 		return (EINVAL);
252 	sc = device_get_softc(dev);
253 	set = sc->hwpstate_settings;
254 	for (i = 0; i < sc->cfnum; i++)
255 		if (CPUFREQ_CMP(cf->freq, set[i].freq))
256 			break;
257 	if (i == sc->cfnum)
258 		return (EINVAL);
259 
260 	return (hwpstate_goto_pstate(dev, set[i].pstate_id));
261 }
262 
263 static int
264 hwpstate_get(device_t dev, struct cf_setting *cf)
265 {
266 	struct hwpstate_softc *sc;
267 	struct hwpstate_setting set;
268 	uint64_t msr;
269 
270 	sc = device_get_softc(dev);
271 	if (cf == NULL)
272 		return (EINVAL);
273 	msr = rdmsr(MSR_AMD_10H_11H_STATUS);
274 	if (msr >= sc->cfnum)
275 		return (EINVAL);
276 	set = sc->hwpstate_settings[msr];
277 
278 	cf->freq = set.freq;
279 	cf->volts = set.volts;
280 	cf->power = set.power;
281 	cf->lat = set.lat;
282 	cf->dev = dev;
283 	return (0);
284 }
285 
286 static int
287 hwpstate_settings(device_t dev, struct cf_setting *sets, int *count)
288 {
289 	struct hwpstate_softc *sc;
290 	struct hwpstate_setting set;
291 	int i;
292 
293 	if (sets == NULL || count == NULL)
294 		return (EINVAL);
295 	sc = device_get_softc(dev);
296 	if (*count < sc->cfnum)
297 		return (E2BIG);
298 	for (i = 0; i < sc->cfnum; i++, sets++) {
299 		set = sc->hwpstate_settings[i];
300 		sets->freq = set.freq;
301 		sets->volts = set.volts;
302 		sets->power = set.power;
303 		sets->lat = set.lat;
304 		sets->dev = dev;
305 	}
306 	*count = sc->cfnum;
307 
308 	return (0);
309 }
310 
311 static int
312 hwpstate_type(device_t dev, int *type)
313 {
314 
315 	if (type == NULL)
316 		return (EINVAL);
317 
318 	*type = CPUFREQ_TYPE_ABSOLUTE;
319 	return (0);
320 }
321 
322 static void
323 hwpstate_identify(driver_t *driver, device_t parent)
324 {
325 
326 	if (device_find_child(parent, "hwpstate", -1) != NULL)
327 		return;
328 
329 	if ((cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10) &&
330 	    cpu_vendor_id != CPU_VENDOR_HYGON)
331 		return;
332 
333 	/*
334 	 * Check if hardware pstate enable bit is set.
335 	 */
336 	if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) {
337 		HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n");
338 		return;
339 	}
340 
341 	if (resource_disabled("hwpstate", 0))
342 		return;
343 
344 	if (BUS_ADD_CHILD(parent, 10, "hwpstate", device_get_unit(parent))
345 	    == NULL)
346 		device_printf(parent, "hwpstate: add child failed\n");
347 }
348 
349 static int
350 hwpstate_probe(device_t dev)
351 {
352 	struct hwpstate_softc *sc;
353 	device_t perf_dev;
354 	uint64_t msr;
355 	int error, type;
356 
357 	/*
358 	 * Only hwpstate0.
359 	 * It goes well with acpi_throttle.
360 	 */
361 	if (device_get_unit(dev) != 0)
362 		return (ENXIO);
363 
364 	sc = device_get_softc(dev);
365 	sc->dev = dev;
366 
367 	/*
368 	 * Check if acpi_perf has INFO only flag.
369 	 */
370 	perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
371 	error = TRUE;
372 	if (perf_dev && device_is_attached(perf_dev)) {
373 		error = CPUFREQ_DRV_TYPE(perf_dev, &type);
374 		if (error == 0) {
375 			if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) {
376 				/*
377 				 * If acpi_perf doesn't have INFO_ONLY flag,
378 				 * it will take care of pstate transitions.
379 				 */
380 				HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n");
381 				return (ENXIO);
382 			} else {
383 				/*
384 				 * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW)
385 				 * we can get _PSS info from acpi_perf
386 				 * without going into ACPI.
387 				 */
388 				HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n");
389 				error = hwpstate_get_info_from_acpi_perf(dev, perf_dev);
390 			}
391 		}
392 	}
393 
394 	if (error == 0) {
395 		/*
396 		 * Now we get _PSS info from acpi_perf without error.
397 		 * Let's check it.
398 		 */
399 		msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
400 		if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) {
401 			HWPSTATE_DEBUG(dev, "MSR (%jd) and ACPI _PSS (%d)"
402 			    " count mismatch\n", (intmax_t)msr, sc->cfnum);
403 			error = TRUE;
404 		}
405 	}
406 
407 	/*
408 	 * If we cannot get info from acpi_perf,
409 	 * Let's get info from MSRs.
410 	 */
411 	if (error)
412 		error = hwpstate_get_info_from_msr(dev);
413 	if (error)
414 		return (error);
415 
416 	device_set_desc(dev, "Cool`n'Quiet 2.0");
417 	return (0);
418 }
419 
420 static int
421 hwpstate_attach(device_t dev)
422 {
423 
424 	return (cpufreq_register(dev));
425 }
426 
427 static int
428 hwpstate_get_info_from_msr(device_t dev)
429 {
430 	struct hwpstate_softc *sc;
431 	struct hwpstate_setting *hwpstate_set;
432 	uint64_t msr;
433 	int family, i, fid, did;
434 
435 	family = CPUID_TO_FAMILY(cpu_id);
436 	sc = device_get_softc(dev);
437 	/* Get pstate count */
438 	msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
439 	sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr);
440 	hwpstate_set = sc->hwpstate_settings;
441 	for (i = 0; i < sc->cfnum; i++) {
442 		msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i);
443 		if ((msr & ((uint64_t)1 << 63)) == 0) {
444 			HWPSTATE_DEBUG(dev, "msr is not valid.\n");
445 			return (ENXIO);
446 		}
447 		did = AMD_10H_11H_CUR_DID(msr);
448 		fid = AMD_10H_11H_CUR_FID(msr);
449 
450 		/* Convert fid/did to frequency. */
451 		switch (family) {
452 		case 0x11:
453 			hwpstate_set[i].freq = (100 * (fid + 0x08)) >> did;
454 			break;
455 		case 0x10:
456 		case 0x12:
457 		case 0x15:
458 		case 0x16:
459 			hwpstate_set[i].freq = (100 * (fid + 0x10)) >> did;
460 			break;
461 		case 0x17:
462 		case 0x18:
463 			did = AMD_17H_CUR_DID(msr);
464 			if (did == 0) {
465 				HWPSTATE_DEBUG(dev, "unexpected did: 0\n");
466 				did = 1;
467 			}
468 			fid = AMD_17H_CUR_FID(msr);
469 			hwpstate_set[i].freq = (200 * fid) / did;
470 			break;
471 		default:
472 			HWPSTATE_DEBUG(dev, "get_info_from_msr: %s family"
473 			    " 0x%02x CPUs are not supported yet\n",
474 			    cpu_vendor_id == CPU_VENDOR_HYGON ? "Hygon" : "AMD",
475 			    family);
476 			return (ENXIO);
477 		}
478 		hwpstate_set[i].pstate_id = i;
479 		/* There was volts calculation, but deleted it. */
480 		hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN;
481 		hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN;
482 		hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN;
483 	}
484 	return (0);
485 }
486 
487 static int
488 hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev)
489 {
490 	struct hwpstate_softc *sc;
491 	struct cf_setting *perf_set;
492 	struct hwpstate_setting *hwpstate_set;
493 	int count, error, i;
494 
495 	perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT);
496 	if (perf_set == NULL) {
497 		HWPSTATE_DEBUG(dev, "nomem\n");
498 		return (ENOMEM);
499 	}
500 	/*
501 	 * Fetch settings from acpi_perf.
502 	 * Now it is attached, and has info only flag.
503 	 */
504 	count = MAX_SETTINGS;
505 	error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count);
506 	if (error) {
507 		HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n");
508 		goto out;
509 	}
510 	sc = device_get_softc(dev);
511 	sc->cfnum = count;
512 	hwpstate_set = sc->hwpstate_settings;
513 	for (i = 0; i < count; i++) {
514 		if (i == perf_set[i].spec[0]) {
515 			hwpstate_set[i].pstate_id = i;
516 			hwpstate_set[i].freq = perf_set[i].freq;
517 			hwpstate_set[i].volts = perf_set[i].volts;
518 			hwpstate_set[i].power = perf_set[i].power;
519 			hwpstate_set[i].lat = perf_set[i].lat;
520 		} else {
521 			HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n");
522 			error = ENXIO;
523 			goto out;
524 		}
525 	}
526 out:
527 	if (perf_set)
528 		free(perf_set, M_TEMP);
529 	return (error);
530 }
531 
532 static int
533 hwpstate_detach(device_t dev)
534 {
535 
536 	hwpstate_goto_pstate(dev, 0);
537 	return (cpufreq_unregister(dev));
538 }
539 
540 static int
541 hwpstate_shutdown(device_t dev)
542 {
543 
544 	/* hwpstate_goto_pstate(dev, 0); */
545 	return (0);
546 }
547 
548 static int
549 hwpstate_features(driver_t *driver, u_int *features)
550 {
551 
552 	/* Notify the ACPI CPU that we support direct access to MSRs */
553 	*features = ACPI_CAP_PERF_MSRS;
554 	return (0);
555 }
556