xref: /freebsd/sys/dev/acpica/acpi_throttle.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1 /*-
2  * Copyright (c) 2003-2005 Nate Lawson (SDG)
3  * Copyright (c) 2001 Michael Smith
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *	notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *	notice, this list of conditions and the following disclaimer in the
13  *	documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include "opt_acpi.h"
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/cpu.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/module.h>
36 #include <sys/rman.h>
37 
38 #include <machine/bus.h>
39 
40 #include <contrib/dev/acpica/include/acpi.h>
41 
42 #include <dev/acpica/acpivar.h>
43 #include <dev/pci/pcivar.h>
44 
45 #include "cpufreq_if.h"
46 
47 /*
48  * Throttling provides relative frequency control.  It involves modulating
49  * the clock so that the CPU is active for only a fraction of the normal
50  * clock cycle.  It does not change voltage and so is less efficient than
51  * other mechanisms.  Since it is relative, it can be used in addition to
52  * absolute cpufreq drivers.  We support the ACPI 2.0 specification.
53  */
54 
55 struct acpi_throttle_softc {
56 	device_t	 cpu_dev;
57 	ACPI_HANDLE	 cpu_handle;
58 	uint32_t	 cpu_p_blk;	/* ACPI P_BLK location */
59 	uint32_t	 cpu_p_blk_len;	/* P_BLK length (must be 6). */
60 	struct resource	*cpu_p_cnt;	/* Throttling control register */
61 	int		 cpu_p_type;	/* Resource type for cpu_p_cnt. */
62 	uint32_t	 cpu_thr_state;	/* Current throttle setting. */
63 };
64 
65 #define THR_GET_REG(reg) 					\
66 	(bus_space_read_4(rman_get_bustag((reg)), 		\
67 			  rman_get_bushandle((reg)), 0))
68 #define THR_SET_REG(reg, val)					\
69 	(bus_space_write_4(rman_get_bustag((reg)), 		\
70 			   rman_get_bushandle((reg)), 0, (val)))
71 
72 /*
73  * Speeds are stored in counts, from 1 to CPU_MAX_SPEED, and
74  * reported to the user in hundredths of a percent.
75  */
76 #define CPU_MAX_SPEED		(1 << cpu_duty_width)
77 #define CPU_SPEED_PERCENT(x)	((10000 * (x)) / CPU_MAX_SPEED)
78 #define CPU_SPEED_PRINTABLE(x)	(CPU_SPEED_PERCENT(x) / 10),	\
79 				(CPU_SPEED_PERCENT(x) % 10)
80 #define CPU_P_CNT_THT_EN	(1<<4)
81 #define CPU_QUIRK_NO_THROTTLE	(1<<1)	/* Throttling is not usable. */
82 
83 #define PCI_VENDOR_INTEL	0x8086
84 #define PCI_DEVICE_82371AB_3	0x7113	/* PIIX4 chipset for quirks. */
85 #define PCI_REVISION_A_STEP	0
86 #define PCI_REVISION_B_STEP	1
87 
88 static uint32_t	cpu_duty_offset;	/* Offset in P_CNT of throttle val. */
89 static uint32_t	cpu_duty_width;		/* Bit width of throttle value. */
90 static int	thr_rid;		/* Driver-wide resource id. */
91 static int	thr_quirks;		/* Indicate any hardware bugs. */
92 
93 static void	acpi_throttle_identify(driver_t *driver, device_t parent);
94 static int	acpi_throttle_probe(device_t dev);
95 static int	acpi_throttle_attach(device_t dev);
96 static int	acpi_throttle_evaluate(struct acpi_throttle_softc *sc);
97 static void	acpi_throttle_quirks(struct acpi_throttle_softc *sc);
98 static int	acpi_thr_settings(device_t dev, struct cf_setting *sets,
99 		    int *count);
100 static int	acpi_thr_set(device_t dev, const struct cf_setting *set);
101 static int	acpi_thr_get(device_t dev, struct cf_setting *set);
102 static int	acpi_thr_type(device_t dev, int *type);
103 
104 static device_method_t acpi_throttle_methods[] = {
105 	/* Device interface */
106 	DEVMETHOD(device_identify,	acpi_throttle_identify),
107 	DEVMETHOD(device_probe,		acpi_throttle_probe),
108 	DEVMETHOD(device_attach,	acpi_throttle_attach),
109 
110 	/* cpufreq interface */
111 	DEVMETHOD(cpufreq_drv_set,	acpi_thr_set),
112 	DEVMETHOD(cpufreq_drv_get,	acpi_thr_get),
113 	DEVMETHOD(cpufreq_drv_type,	acpi_thr_type),
114 	DEVMETHOD(cpufreq_drv_settings,	acpi_thr_settings),
115 	DEVMETHOD_END
116 };
117 
118 static driver_t acpi_throttle_driver = {
119 	"acpi_throttle",
120 	acpi_throttle_methods,
121 	sizeof(struct acpi_throttle_softc),
122 };
123 
124 DRIVER_MODULE(acpi_throttle, cpu, acpi_throttle_driver, 0, 0);
125 
126 static void
acpi_throttle_identify(driver_t * driver,device_t parent)127 acpi_throttle_identify(driver_t *driver, device_t parent)
128 {
129 	ACPI_BUFFER buf;
130 	ACPI_HANDLE handle;
131 	ACPI_OBJECT *obj;
132 
133 	/* Make sure we're not being doubly invoked. */
134 	if (device_find_child(parent, "acpi_throttle", -1))
135 		return;
136 
137 	/* Check for a valid duty width and parent CPU type. */
138 	handle = acpi_get_handle(parent);
139 	if (handle == NULL)
140 		return;
141 	if (AcpiGbl_FADT.DutyWidth == 0 ||
142 	    acpi_get_type(parent) != ACPI_TYPE_PROCESSOR)
143 		return;
144 
145 	/*
146 	 * Add a child if there's a non-NULL P_BLK and correct length, or
147 	 * if the _PTC method is present.
148 	 */
149 	buf.Pointer = NULL;
150 	buf.Length = ACPI_ALLOCATE_BUFFER;
151 	if (ACPI_FAILURE(AcpiEvaluateObject(handle, NULL, NULL, &buf)))
152 		return;
153 	obj = (ACPI_OBJECT *)buf.Pointer;
154 	if ((obj->Processor.PblkAddress && obj->Processor.PblkLength >= 4) ||
155 	    ACPI_SUCCESS(AcpiEvaluateObject(handle, "_PTC", NULL, NULL))) {
156 		if (BUS_ADD_CHILD(parent, 0, "acpi_throttle",
157 		    device_get_unit(parent)) == NULL)
158 			device_printf(parent, "add throttle child failed\n");
159 	}
160 	AcpiOsFree(obj);
161 }
162 
163 static int
acpi_throttle_probe(device_t dev)164 acpi_throttle_probe(device_t dev)
165 {
166 
167 	if (resource_disabled("acpi_throttle", 0))
168 		return (ENXIO);
169 
170 	/*
171 	 * On i386 platforms at least, ACPI throttling is accomplished by
172 	 * the chipset modulating the STPCLK# pin based on the duty cycle.
173 	 * Since p4tcc uses the same mechanism (but internal to the CPU),
174 	 * we disable acpi_throttle when p4tcc is also present.
175 	 */
176 	if (device_find_child(device_get_parent(dev), "p4tcc", -1) &&
177 	    !resource_disabled("p4tcc", 0))
178 		return (ENXIO);
179 
180 	device_set_desc(dev, "ACPI CPU Throttling");
181 	return (0);
182 }
183 
184 static int
acpi_throttle_attach(device_t dev)185 acpi_throttle_attach(device_t dev)
186 {
187 	struct acpi_throttle_softc *sc;
188 	struct cf_setting set;
189 	ACPI_BUFFER buf;
190 	ACPI_OBJECT *obj;
191 	ACPI_STATUS status;
192 	int error;
193 
194 	sc = device_get_softc(dev);
195 	sc->cpu_dev = dev;
196 	sc->cpu_handle = acpi_get_handle(dev);
197 
198 	buf.Pointer = NULL;
199 	buf.Length = ACPI_ALLOCATE_BUFFER;
200 	status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf);
201 	if (ACPI_FAILURE(status)) {
202 		device_printf(dev, "attach failed to get Processor obj - %s\n",
203 		    AcpiFormatException(status));
204 		return (ENXIO);
205 	}
206 	obj = (ACPI_OBJECT *)buf.Pointer;
207 	sc->cpu_p_blk = obj->Processor.PblkAddress;
208 	sc->cpu_p_blk_len = obj->Processor.PblkLength;
209 	AcpiOsFree(obj);
210 
211 	/* If this is the first device probed, check for quirks. */
212 	if (device_get_unit(dev) == 0)
213 		acpi_throttle_quirks(sc);
214 
215 	/* Attempt to attach the actual throttling register. */
216 	error = acpi_throttle_evaluate(sc);
217 	if (error)
218 		return (error);
219 
220 	/*
221 	 * Set our initial frequency to the highest since some systems
222 	 * seem to boot with this at the lowest setting.
223 	 */
224 	set.freq = 10000;
225 	acpi_thr_set(dev, &set);
226 
227 	/* Everything went ok, register with cpufreq(4). */
228 	cpufreq_register(dev);
229 	return (0);
230 }
231 
232 static int
acpi_throttle_evaluate(struct acpi_throttle_softc * sc)233 acpi_throttle_evaluate(struct acpi_throttle_softc *sc)
234 {
235 	uint32_t duty_end;
236 	ACPI_BUFFER buf;
237 	ACPI_OBJECT obj;
238 	ACPI_GENERIC_ADDRESS gas;
239 	ACPI_STATUS status;
240 
241 	/* Get throttling parameters from the FADT.  0 means not supported. */
242 	if (device_get_unit(sc->cpu_dev) == 0) {
243 		cpu_duty_offset = AcpiGbl_FADT.DutyOffset;
244 		cpu_duty_width = AcpiGbl_FADT.DutyWidth;
245 	}
246 	if (cpu_duty_width == 0 || (thr_quirks & CPU_QUIRK_NO_THROTTLE) != 0)
247 		return (ENXIO);
248 
249 	/* Validate the duty offset/width. */
250 	duty_end = cpu_duty_offset + cpu_duty_width - 1;
251 	if (duty_end > 31) {
252 		device_printf(sc->cpu_dev,
253 		    "CLK_VAL field overflows P_CNT register\n");
254 		return (ENXIO);
255 	}
256 	if (cpu_duty_offset <= 4 && duty_end >= 4) {
257 		device_printf(sc->cpu_dev,
258 		    "CLK_VAL field overlaps THT_EN bit\n");
259 		return (ENXIO);
260 	}
261 
262 	/*
263 	 * If not present, fall back to using the processor's P_BLK to find
264 	 * the P_CNT register.
265 	 *
266 	 * Note that some systems seem to duplicate the P_BLK pointer
267 	 * across multiple CPUs, so not getting the resource is not fatal.
268 	 */
269 	buf.Pointer = &obj;
270 	buf.Length = sizeof(obj);
271 	status = AcpiEvaluateObject(sc->cpu_handle, "_PTC", NULL, &buf);
272 	if (ACPI_SUCCESS(status)) {
273 		if (obj.Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3) {
274 			device_printf(sc->cpu_dev, "_PTC buffer too small\n");
275 			return (ENXIO);
276 		}
277 		memcpy(&gas, obj.Buffer.Pointer + 3, sizeof(gas));
278 		acpi_bus_alloc_gas(sc->cpu_dev, &sc->cpu_p_type, &thr_rid,
279 		    &gas, &sc->cpu_p_cnt, 0);
280 		if (sc->cpu_p_cnt != NULL && bootverbose) {
281 			device_printf(sc->cpu_dev, "P_CNT from _PTC %#jx\n",
282 			    gas.Address);
283 		}
284 	}
285 
286 	/* If _PTC not present or other failure, try the P_BLK. */
287 	if (sc->cpu_p_cnt == NULL) {
288 		/*
289 		 * The spec says P_BLK must be 6 bytes long.  However, some
290 		 * systems use it to indicate a fractional set of features
291 		 * present so we take anything >= 4.
292 		 */
293 		if (sc->cpu_p_blk_len < 4)
294 			return (ENXIO);
295 		gas.Address = sc->cpu_p_blk;
296 		gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
297 		gas.BitWidth = 32;
298 		acpi_bus_alloc_gas(sc->cpu_dev, &sc->cpu_p_type, &thr_rid,
299 		    &gas, &sc->cpu_p_cnt, 0);
300 		if (sc->cpu_p_cnt != NULL) {
301 			if (bootverbose)
302 				device_printf(sc->cpu_dev,
303 				    "P_CNT from P_BLK %#x\n", sc->cpu_p_blk);
304 		} else {
305 			device_printf(sc->cpu_dev, "failed to attach P_CNT\n");
306 			return (ENXIO);
307 		}
308 	}
309 	thr_rid++;
310 
311 	return (0);
312 }
313 
314 static void
acpi_throttle_quirks(struct acpi_throttle_softc * sc)315 acpi_throttle_quirks(struct acpi_throttle_softc *sc)
316 {
317 #ifdef __i386__
318 	device_t acpi_dev;
319 
320 	/* Look for various quirks of the PIIX4 part. */
321 	acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3);
322 	if (acpi_dev) {
323 		switch (pci_get_revid(acpi_dev)) {
324 		/*
325 		 * Disable throttling control on PIIX4 A and B-step.
326 		 * See specification changes #13 ("Manual Throttle Duty Cycle")
327 		 * and #14 ("Enabling and Disabling Manual Throttle"), plus
328 		 * erratum #5 ("STPCLK# Deassertion Time") from the January
329 		 * 2002 PIIX4 specification update.  Note that few (if any)
330 		 * mobile systems ever used this part.
331 		 */
332 		case PCI_REVISION_A_STEP:
333 		case PCI_REVISION_B_STEP:
334 			thr_quirks |= CPU_QUIRK_NO_THROTTLE;
335 			break;
336 		default:
337 			break;
338 		}
339 	}
340 #endif
341 }
342 
343 static int
acpi_thr_settings(device_t dev,struct cf_setting * sets,int * count)344 acpi_thr_settings(device_t dev, struct cf_setting *sets, int *count)
345 {
346 	int i, speed;
347 
348 	if (sets == NULL || count == NULL)
349 		return (EINVAL);
350 	if (*count < CPU_MAX_SPEED)
351 		return (E2BIG);
352 
353 	/* Return a list of valid settings for this driver. */
354 	memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * CPU_MAX_SPEED);
355 	for (i = 0, speed = CPU_MAX_SPEED; speed != 0; i++, speed--) {
356 		sets[i].freq = CPU_SPEED_PERCENT(speed);
357 		sets[i].dev = dev;
358 	}
359 	*count = CPU_MAX_SPEED;
360 
361 	return (0);
362 }
363 
364 static int
acpi_thr_set(device_t dev,const struct cf_setting * set)365 acpi_thr_set(device_t dev, const struct cf_setting *set)
366 {
367 	struct acpi_throttle_softc *sc;
368 	uint32_t clk_val, p_cnt, speed;
369 
370 	if (set == NULL)
371 		return (EINVAL);
372 	sc = device_get_softc(dev);
373 
374 	/*
375 	 * Validate requested state converts to a duty cycle that is an
376 	 * integer from [1 .. CPU_MAX_SPEED].
377 	 */
378 	speed = set->freq * CPU_MAX_SPEED / 10000;
379 	if (speed * 10000 != set->freq * CPU_MAX_SPEED ||
380 	    speed < 1 || speed > CPU_MAX_SPEED)
381 		return (EINVAL);
382 
383 	/* If we're at this setting, don't bother applying it again. */
384 	if (speed == sc->cpu_thr_state)
385 		return (0);
386 
387 	/* Get the current P_CNT value and disable throttling */
388 	p_cnt = THR_GET_REG(sc->cpu_p_cnt);
389 	p_cnt &= ~CPU_P_CNT_THT_EN;
390 	THR_SET_REG(sc->cpu_p_cnt, p_cnt);
391 
392 	/* If we're at maximum speed, that's all */
393 	if (speed < CPU_MAX_SPEED) {
394 		/* Mask the old CLK_VAL off and OR in the new value */
395 		clk_val = (CPU_MAX_SPEED - 1) << cpu_duty_offset;
396 		p_cnt &= ~clk_val;
397 		p_cnt |= (speed << cpu_duty_offset);
398 
399 		/* Write the new P_CNT value and then enable throttling */
400 		THR_SET_REG(sc->cpu_p_cnt, p_cnt);
401 		p_cnt |= CPU_P_CNT_THT_EN;
402 		THR_SET_REG(sc->cpu_p_cnt, p_cnt);
403 	}
404 	sc->cpu_thr_state = speed;
405 
406 	return (0);
407 }
408 
409 static int
acpi_thr_get(device_t dev,struct cf_setting * set)410 acpi_thr_get(device_t dev, struct cf_setting *set)
411 {
412 	struct acpi_throttle_softc *sc;
413 	uint32_t p_cnt, clk_val;
414 
415 	if (set == NULL)
416 		return (EINVAL);
417 	sc = device_get_softc(dev);
418 
419 	/* Get the current throttling setting from P_CNT. */
420 	p_cnt = THR_GET_REG(sc->cpu_p_cnt);
421 	clk_val = (p_cnt >> cpu_duty_offset) & (CPU_MAX_SPEED - 1);
422 	sc->cpu_thr_state = clk_val;
423 
424 	memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set));
425 	set->freq = CPU_SPEED_PERCENT(clk_val);
426 	set->dev = dev;
427 
428 	return (0);
429 }
430 
431 static int
acpi_thr_type(device_t dev,int * type)432 acpi_thr_type(device_t dev, int *type)
433 {
434 
435 	if (type == NULL)
436 		return (EINVAL);
437 
438 	*type = CPUFREQ_TYPE_RELATIVE;
439 	return (0);
440 }
441