xref: /freebsd/sys/kern/kern_cpu.c (revision ea906c4152774dff300bb26fbfc1e4188351c89a)
1 /*-
2  * Copyright (c) 2004-2007 Nate Lawson (SDG)
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/bus.h>
32 #include <sys/cpu.h>
33 #include <sys/eventhandler.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/proc.h>
39 #include <sys/queue.h>
40 #include <sys/sbuf.h>
41 #include <sys/sched.h>
42 #include <sys/smp.h>
43 #include <sys/sysctl.h>
44 #include <sys/systm.h>
45 #include <sys/sx.h>
46 #include <sys/timetc.h>
47 #include <sys/taskqueue.h>
48 
49 #include "cpufreq_if.h"
50 
51 /*
52  * Common CPU frequency glue code.  Drivers for specific hardware can
53  * attach this interface to allow users to get/set the CPU frequency.
54  */
55 
56 /*
57  * Number of levels we can handle.  Levels are synthesized from settings
58  * so for M settings and N drivers, there may be M*N levels.
59  */
60 #define CF_MAX_LEVELS	64
61 
62 struct cf_saved_freq {
63 	struct cf_level			level;
64 	int				priority;
65 	SLIST_ENTRY(cf_saved_freq)	link;
66 };
67 
68 struct cpufreq_softc {
69 	struct sx			lock;
70 	struct cf_level			curr_level;
71 	int				curr_priority;
72 	SLIST_HEAD(, cf_saved_freq)	saved_freq;
73 	struct cf_level_lst		all_levels;
74 	int				all_count;
75 	int				max_mhz;
76 	device_t			dev;
77 	struct sysctl_ctx_list		sysctl_ctx;
78 	struct task			startup_task;
79 };
80 
81 struct cf_setting_array {
82 	struct cf_setting		sets[MAX_SETTINGS];
83 	int				count;
84 	TAILQ_ENTRY(cf_setting_array)	link;
85 };
86 
87 TAILQ_HEAD(cf_setting_lst, cf_setting_array);
88 
89 #define CF_MTX_INIT(x)		sx_init((x), "cpufreq lock")
90 #define CF_MTX_LOCK(x)		sx_xlock((x))
91 #define CF_MTX_UNLOCK(x)	sx_xunlock((x))
92 #define CF_MTX_ASSERT(x)	sx_assert((x), SX_XLOCKED)
93 
94 #define CF_DEBUG(msg...)	do {		\
95 	if (cf_verbose)				\
96 		printf("cpufreq: " msg);	\
97 	} while (0)
98 
99 static int	cpufreq_attach(device_t dev);
100 static void	cpufreq_startup_task(void *ctx, int pending);
101 static int	cpufreq_detach(device_t dev);
102 static int	cf_set_method(device_t dev, const struct cf_level *level,
103 		    int priority);
104 static int	cf_get_method(device_t dev, struct cf_level *level);
105 static int	cf_levels_method(device_t dev, struct cf_level *levels,
106 		    int *count);
107 static int	cpufreq_insert_abs(struct cpufreq_softc *sc,
108 		    struct cf_setting *sets, int count);
109 static int	cpufreq_expand_set(struct cpufreq_softc *sc,
110 		    struct cf_setting_array *set_arr);
111 static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc,
112 		    struct cf_level *dup, struct cf_setting *set);
113 static int	cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS);
114 static int	cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS);
115 static int	cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS);
116 
117 static device_method_t cpufreq_methods[] = {
118 	DEVMETHOD(device_probe,		bus_generic_probe),
119 	DEVMETHOD(device_attach,	cpufreq_attach),
120 	DEVMETHOD(device_detach,	cpufreq_detach),
121 
122         DEVMETHOD(cpufreq_set,		cf_set_method),
123         DEVMETHOD(cpufreq_get,		cf_get_method),
124         DEVMETHOD(cpufreq_levels,	cf_levels_method),
125 	{0, 0}
126 };
127 static driver_t cpufreq_driver = {
128 	"cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc)
129 };
130 static devclass_t cpufreq_dc;
131 DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, cpufreq_dc, 0, 0);
132 
133 static int		cf_lowest_freq;
134 static int		cf_verbose;
135 TUNABLE_INT("debug.cpufreq.lowest", &cf_lowest_freq);
136 TUNABLE_INT("debug.cpufreq.verbose", &cf_verbose);
137 SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL, "cpufreq debugging");
138 SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RW, &cf_lowest_freq, 1,
139     "Don't provide levels below this frequency.");
140 SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RW, &cf_verbose, 1,
141     "Print verbose debugging messages");
142 
143 static int
144 cpufreq_attach(device_t dev)
145 {
146 	struct cpufreq_softc *sc;
147 	device_t parent;
148 	int numdevs;
149 
150 	CF_DEBUG("initializing %s\n", device_get_nameunit(dev));
151 	sc = device_get_softc(dev);
152 	parent = device_get_parent(dev);
153 	sc->dev = dev;
154 	sysctl_ctx_init(&sc->sysctl_ctx);
155 	TAILQ_INIT(&sc->all_levels);
156 	CF_MTX_INIT(&sc->lock);
157 	sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
158 	SLIST_INIT(&sc->saved_freq);
159 	sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
160 
161 	/*
162 	 * Only initialize one set of sysctls for all CPUs.  In the future,
163 	 * if multiple CPUs can have different settings, we can move these
164 	 * sysctls to be under every CPU instead of just the first one.
165 	 */
166 	numdevs = devclass_get_count(cpufreq_dc);
167 	if (numdevs > 1)
168 		return (0);
169 
170 	CF_DEBUG("initializing one-time data for %s\n",
171 	    device_get_nameunit(dev));
172 	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
173 	    SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
174 	    OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
175 	    cpufreq_curr_sysctl, "I", "Current CPU frequency");
176 	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
177 	    SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
178 	    OID_AUTO, "freq_levels", CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
179 	    cpufreq_levels_sysctl, "A", "CPU frequency levels");
180 
181 	/*
182 	 * Queue a one-shot broadcast that levels have changed.
183 	 * It will run once the system has completed booting.
184 	 */
185 	TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, dev);
186 	taskqueue_enqueue(taskqueue_thread, &sc->startup_task);
187 
188 	return (0);
189 }
190 
191 /* Handle any work to be done for all drivers that attached during boot. */
192 static void
193 cpufreq_startup_task(void *ctx, int pending)
194 {
195 
196 	cpufreq_settings_changed((device_t)ctx);
197 }
198 
199 static int
200 cpufreq_detach(device_t dev)
201 {
202 	struct cpufreq_softc *sc;
203 	struct cf_saved_freq *saved_freq;
204 	int numdevs;
205 
206 	CF_DEBUG("shutdown %s\n", device_get_nameunit(dev));
207 	sc = device_get_softc(dev);
208 	sysctl_ctx_free(&sc->sysctl_ctx);
209 
210 	while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) {
211 		SLIST_REMOVE_HEAD(&sc->saved_freq, link);
212 		free(saved_freq, M_TEMP);
213 	}
214 
215 	/* Only clean up these resources when the last device is detaching. */
216 	numdevs = devclass_get_count(cpufreq_dc);
217 	if (numdevs == 1) {
218 		CF_DEBUG("final shutdown for %s\n", device_get_nameunit(dev));
219 	}
220 
221 	return (0);
222 }
223 
224 static int
225 cf_set_method(device_t dev, const struct cf_level *level, int priority)
226 {
227 	struct cpufreq_softc *sc;
228 	const struct cf_setting *set;
229 	struct cf_saved_freq *saved_freq, *curr_freq;
230 	struct pcpu *pc;
231 	int error, i;
232 
233 	sc = device_get_softc(dev);
234 	error = 0;
235 	set = NULL;
236 	saved_freq = NULL;
237 
238 	/* We are going to change levels so notify the pre-change handler. */
239 	EVENTHANDLER_INVOKE(cpufreq_pre_change, level, &error);
240 	if (error != 0) {
241 		EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
242 		return (error);
243 	}
244 
245 	CF_MTX_LOCK(&sc->lock);
246 
247 #ifdef SMP
248 	/*
249 	 * If still booting and secondary CPUs not started yet, don't allow
250 	 * changing the frequency until they're online.  This is because we
251 	 * can't switch to them using sched_bind() and thus we'd only be
252 	 * switching the main CPU.  XXXTODO: Need to think more about how to
253 	 * handle having different CPUs at different frequencies.
254 	 */
255 	if (mp_ncpus > 1 && !smp_active) {
256 		device_printf(dev, "rejecting change, SMP not started yet\n");
257 		error = ENXIO;
258 		goto out;
259 	}
260 #endif /* SMP */
261 
262 	/*
263 	 * If the requested level has a lower priority, don't allow
264 	 * the new level right now.
265 	 */
266 	if (priority < sc->curr_priority) {
267 		CF_DEBUG("ignoring, curr prio %d less than %d\n", priority,
268 		    sc->curr_priority);
269 		error = EPERM;
270 		goto out;
271 	}
272 
273 	/*
274 	 * If the caller didn't specify a level and one is saved, prepare to
275 	 * restore the saved level.  If none has been saved, return an error.
276 	 */
277 	if (level == NULL) {
278 		saved_freq = SLIST_FIRST(&sc->saved_freq);
279 		if (saved_freq == NULL) {
280 			CF_DEBUG("NULL level, no saved level\n");
281 			error = ENXIO;
282 			goto out;
283 		}
284 		level = &saved_freq->level;
285 		priority = saved_freq->priority;
286 		CF_DEBUG("restoring saved level, freq %d prio %d\n",
287 		    level->total_set.freq, priority);
288 	}
289 
290 	/* Reject levels that are below our specified threshold. */
291 	if (level->total_set.freq < cf_lowest_freq) {
292 		CF_DEBUG("rejecting freq %d, less than %d limit\n",
293 		    level->total_set.freq, cf_lowest_freq);
294 		error = EINVAL;
295 		goto out;
296 	}
297 
298 	/* If already at this level, just return. */
299 	if (CPUFREQ_CMP(sc->curr_level.total_set.freq, level->total_set.freq)) {
300 		CF_DEBUG("skipping freq %d, same as current level %d\n",
301 		    level->total_set.freq, sc->curr_level.total_set.freq);
302 		goto skip;
303 	}
304 
305 	/* First, set the absolute frequency via its driver. */
306 	set = &level->abs_set;
307 	if (set->dev) {
308 		if (!device_is_attached(set->dev)) {
309 			error = ENXIO;
310 			goto out;
311 		}
312 
313 		/* Bind to the target CPU before switching. */
314 		pc = cpu_get_pcpu(set->dev);
315 		thread_lock(curthread);
316 		sched_bind(curthread, pc->pc_cpuid);
317 		thread_unlock(curthread);
318 		CF_DEBUG("setting abs freq %d on %s (cpu %d)\n", set->freq,
319 		    device_get_nameunit(set->dev), PCPU_GET(cpuid));
320 		error = CPUFREQ_DRV_SET(set->dev, set);
321 		thread_lock(curthread);
322 		sched_unbind(curthread);
323 		thread_unlock(curthread);
324 		if (error) {
325 			goto out;
326 		}
327 	}
328 
329 	/* Next, set any/all relative frequencies via their drivers. */
330 	for (i = 0; i < level->rel_count; i++) {
331 		set = &level->rel_set[i];
332 		if (!device_is_attached(set->dev)) {
333 			error = ENXIO;
334 			goto out;
335 		}
336 
337 		/* Bind to the target CPU before switching. */
338 		pc = cpu_get_pcpu(set->dev);
339 		thread_lock(curthread);
340 		sched_bind(curthread, pc->pc_cpuid);
341 		thread_unlock(curthread);
342 		CF_DEBUG("setting rel freq %d on %s (cpu %d)\n", set->freq,
343 		    device_get_nameunit(set->dev), PCPU_GET(cpuid));
344 		error = CPUFREQ_DRV_SET(set->dev, set);
345 		thread_lock(curthread);
346 		sched_unbind(curthread);
347 		thread_unlock(curthread);
348 		if (error) {
349 			/* XXX Back out any successful setting? */
350 			goto out;
351 		}
352 	}
353 
354 skip:
355 	/*
356 	 * Before recording the current level, check if we're going to a
357 	 * higher priority.  If so, save the previous level and priority.
358 	 */
359 	if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN &&
360 	    priority > sc->curr_priority) {
361 		CF_DEBUG("saving level, freq %d prio %d\n",
362 		    sc->curr_level.total_set.freq, sc->curr_priority);
363 		curr_freq = malloc(sizeof(*curr_freq), M_TEMP, M_NOWAIT);
364 		if (curr_freq == NULL) {
365 			error = ENOMEM;
366 			goto out;
367 		}
368 		curr_freq->level = sc->curr_level;
369 		curr_freq->priority = sc->curr_priority;
370 		SLIST_INSERT_HEAD(&sc->saved_freq, curr_freq, link);
371 	}
372 	sc->curr_level = *level;
373 	sc->curr_priority = priority;
374 
375 	/* If we were restoring a saved state, reset it to "unused". */
376 	if (saved_freq != NULL) {
377 		CF_DEBUG("resetting saved level\n");
378 		sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
379 		SLIST_REMOVE_HEAD(&sc->saved_freq, link);
380 		free(saved_freq, M_TEMP);
381 	}
382 
383 out:
384 	CF_MTX_UNLOCK(&sc->lock);
385 
386 	/*
387 	 * We changed levels (or attempted to) so notify the post-change
388 	 * handler of new frequency or error.
389 	 */
390 	EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
391 	if (error && set)
392 		device_printf(set->dev, "set freq failed, err %d\n", error);
393 
394 	return (error);
395 }
396 
397 static int
398 cf_get_method(device_t dev, struct cf_level *level)
399 {
400 	struct cpufreq_softc *sc;
401 	struct cf_level *levels;
402 	struct cf_setting *curr_set, set;
403 	struct pcpu *pc;
404 	device_t *devs;
405 	int count, error, i, n, numdevs;
406 	uint64_t rate;
407 
408 	sc = device_get_softc(dev);
409 	error = 0;
410 	levels = NULL;
411 
412 	/* If we already know the current frequency, we're done. */
413 	CF_MTX_LOCK(&sc->lock);
414 	curr_set = &sc->curr_level.total_set;
415 	if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
416 		CF_DEBUG("get returning known freq %d\n", curr_set->freq);
417 		goto out;
418 	}
419 	CF_MTX_UNLOCK(&sc->lock);
420 
421 	/*
422 	 * We need to figure out the current level.  Loop through every
423 	 * driver, getting the current setting.  Then, attempt to get a best
424 	 * match of settings against each level.
425 	 */
426 	count = CF_MAX_LEVELS;
427 	levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
428 	if (levels == NULL)
429 		return (ENOMEM);
430 	error = CPUFREQ_LEVELS(sc->dev, levels, &count);
431 	if (error) {
432 		if (error == E2BIG)
433 			printf("cpufreq: need to increase CF_MAX_LEVELS\n");
434 		free(levels, M_TEMP);
435 		return (error);
436 	}
437 	error = device_get_children(device_get_parent(dev), &devs, &numdevs);
438 	if (error) {
439 		free(levels, M_TEMP);
440 		return (error);
441 	}
442 
443 	/*
444 	 * Reacquire the lock and search for the given level.
445 	 *
446 	 * XXX Note: this is not quite right since we really need to go
447 	 * through each level and compare both absolute and relative
448 	 * settings for each driver in the system before making a match.
449 	 * The estimation code below catches this case though.
450 	 */
451 	CF_MTX_LOCK(&sc->lock);
452 	for (n = 0; n < numdevs && curr_set->freq == CPUFREQ_VAL_UNKNOWN; n++) {
453 		if (!device_is_attached(devs[n]))
454 			continue;
455 		if (CPUFREQ_DRV_GET(devs[n], &set) != 0)
456 			continue;
457 		for (i = 0; i < count; i++) {
458 			if (CPUFREQ_CMP(set.freq, levels[i].total_set.freq)) {
459 				sc->curr_level = levels[i];
460 				break;
461 			}
462 		}
463 	}
464 	free(devs, M_TEMP);
465 	if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
466 		CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq);
467 		goto out;
468 	}
469 
470 	/*
471 	 * We couldn't find an exact match, so attempt to estimate and then
472 	 * match against a level.
473 	 */
474 	pc = cpu_get_pcpu(dev);
475 	if (pc == NULL) {
476 		error = ENXIO;
477 		goto out;
478 	}
479 	cpu_est_clockrate(pc->pc_cpuid, &rate);
480 	rate /= 1000000;
481 	for (i = 0; i < count; i++) {
482 		if (CPUFREQ_CMP(rate, levels[i].total_set.freq)) {
483 			sc->curr_level = levels[i];
484 			CF_DEBUG("get estimated freq %d\n", curr_set->freq);
485 			goto out;
486 		}
487 	}
488 	error = ENXIO;
489 
490 out:
491 	if (error == 0)
492 		*level = sc->curr_level;
493 
494 	CF_MTX_UNLOCK(&sc->lock);
495 	if (levels)
496 		free(levels, M_TEMP);
497 	return (error);
498 }
499 
500 static int
501 cf_levels_method(device_t dev, struct cf_level *levels, int *count)
502 {
503 	struct cf_setting_array *set_arr;
504 	struct cf_setting_lst rel_sets;
505 	struct cpufreq_softc *sc;
506 	struct cf_level *lev;
507 	struct cf_setting *sets;
508 	struct pcpu *pc;
509 	device_t *devs;
510 	int error, i, numdevs, set_count, type;
511 	uint64_t rate;
512 
513 	if (levels == NULL || count == NULL)
514 		return (EINVAL);
515 
516 	TAILQ_INIT(&rel_sets);
517 	sc = device_get_softc(dev);
518 	error = device_get_children(device_get_parent(dev), &devs, &numdevs);
519 	if (error)
520 		return (error);
521 	sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT);
522 	if (sets == NULL) {
523 		free(devs, M_TEMP);
524 		return (ENOMEM);
525 	}
526 
527 	/* Get settings from all cpufreq drivers. */
528 	CF_MTX_LOCK(&sc->lock);
529 	for (i = 0; i < numdevs; i++) {
530 		/* Skip devices that aren't ready. */
531 		if (!device_is_attached(devs[i]))
532 			continue;
533 
534 		/*
535 		 * Get settings, skipping drivers that offer no settings or
536 		 * provide settings for informational purposes only.
537 		 */
538 		error = CPUFREQ_DRV_TYPE(devs[i], &type);
539 		if (error || (type & CPUFREQ_FLAG_INFO_ONLY)) {
540 			if (error == 0) {
541 				CF_DEBUG("skipping info-only driver %s\n",
542 				    device_get_nameunit(devs[i]));
543 			}
544 			continue;
545 		}
546 		set_count = MAX_SETTINGS;
547 		error = CPUFREQ_DRV_SETTINGS(devs[i], sets, &set_count);
548 		if (error || set_count == 0)
549 			continue;
550 
551 		/* Add the settings to our absolute/relative lists. */
552 		switch (type & CPUFREQ_TYPE_MASK) {
553 		case CPUFREQ_TYPE_ABSOLUTE:
554 			error = cpufreq_insert_abs(sc, sets, set_count);
555 			break;
556 		case CPUFREQ_TYPE_RELATIVE:
557 			CF_DEBUG("adding %d relative settings\n", set_count);
558 			set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT);
559 			if (set_arr == NULL) {
560 				error = ENOMEM;
561 				goto out;
562 			}
563 			bcopy(sets, set_arr->sets, set_count * sizeof(*sets));
564 			set_arr->count = set_count;
565 			TAILQ_INSERT_TAIL(&rel_sets, set_arr, link);
566 			break;
567 		default:
568 			error = EINVAL;
569 		}
570 		if (error)
571 			goto out;
572 	}
573 
574 	/*
575 	 * If there are no absolute levels, create a fake one at 100%.  We
576 	 * then cache the clockrate for later use as our base frequency.
577 	 *
578 	 * XXX This assumes that the first time through, if we only have
579 	 * relative drivers, the CPU is currently running at 100%.
580 	 */
581 	if (TAILQ_EMPTY(&sc->all_levels)) {
582 		if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) {
583 			pc = cpu_get_pcpu(dev);
584 			cpu_est_clockrate(pc->pc_cpuid, &rate);
585 			sc->max_mhz = rate / 1000000;
586 		}
587 		memset(&sets[0], CPUFREQ_VAL_UNKNOWN, sizeof(*sets));
588 		sets[0].freq = sc->max_mhz;
589 		sets[0].dev = NULL;
590 		error = cpufreq_insert_abs(sc, sets, 1);
591 		if (error)
592 			goto out;
593 	}
594 
595 	/* Create a combined list of absolute + relative levels. */
596 	TAILQ_FOREACH(set_arr, &rel_sets, link)
597 		cpufreq_expand_set(sc, set_arr);
598 
599 	/* If the caller doesn't have enough space, return the actual count. */
600 	if (sc->all_count > *count) {
601 		*count = sc->all_count;
602 		error = E2BIG;
603 		goto out;
604 	}
605 
606 	/* Finally, output the list of levels. */
607 	i = 0;
608 	TAILQ_FOREACH(lev, &sc->all_levels, link) {
609 		/*
610 		 * Skip levels that are too close in frequency to the
611 		 * previous levels.  Some systems report bogus duplicate
612 		 * settings (i.e., for acpi_perf).
613 		 */
614 		if (i > 0 && CPUFREQ_CMP(lev->total_set.freq,
615 		    levels[i - 1].total_set.freq)) {
616 			sc->all_count--;
617 			continue;
618 		}
619 
620 		/* Skip levels that have a frequency that is too low. */
621 		if (lev->total_set.freq < cf_lowest_freq) {
622 			sc->all_count--;
623 			continue;
624 		}
625 
626 		levels[i] = *lev;
627 		i++;
628 	}
629 	*count = sc->all_count;
630 	error = 0;
631 
632 out:
633 	/* Clear all levels since we regenerate them each time. */
634 	while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) {
635 		TAILQ_REMOVE(&sc->all_levels, lev, link);
636 		free(lev, M_TEMP);
637 	}
638 	sc->all_count = 0;
639 
640 	CF_MTX_UNLOCK(&sc->lock);
641 	while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) {
642 		TAILQ_REMOVE(&rel_sets, set_arr, link);
643 		free(set_arr, M_TEMP);
644 	}
645 	free(devs, M_TEMP);
646 	free(sets, M_TEMP);
647 	return (error);
648 }
649 
650 /*
651  * Create levels for an array of absolute settings and insert them in
652  * sorted order in the specified list.
653  */
654 static int
655 cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets,
656     int count)
657 {
658 	struct cf_level_lst *list;
659 	struct cf_level *level, *search;
660 	int i;
661 
662 	CF_MTX_ASSERT(&sc->lock);
663 
664 	list = &sc->all_levels;
665 	for (i = 0; i < count; i++) {
666 		level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO);
667 		if (level == NULL)
668 			return (ENOMEM);
669 		level->abs_set = sets[i];
670 		level->total_set = sets[i];
671 		level->total_set.dev = NULL;
672 		sc->all_count++;
673 
674 		if (TAILQ_EMPTY(list)) {
675 			CF_DEBUG("adding abs setting %d at head\n",
676 			    sets[i].freq);
677 			TAILQ_INSERT_HEAD(list, level, link);
678 			continue;
679 		}
680 
681 		TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link) {
682 			if (sets[i].freq <= search->total_set.freq) {
683 				CF_DEBUG("adding abs setting %d after %d\n",
684 				    sets[i].freq, search->total_set.freq);
685 				TAILQ_INSERT_AFTER(list, search, level, link);
686 				break;
687 			}
688 		}
689 	}
690 	return (0);
691 }
692 
693 /*
694  * Expand a group of relative settings, creating derived levels from them.
695  */
696 static int
697 cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr)
698 {
699 	struct cf_level *fill, *search;
700 	struct cf_setting *set;
701 	int i;
702 
703 	CF_MTX_ASSERT(&sc->lock);
704 
705 	/*
706 	 * Walk the set of all existing levels in reverse.  This is so we
707 	 * create derived states from the lowest absolute settings first
708 	 * and discard duplicates created from higher absolute settings.
709 	 * For instance, a level of 50 Mhz derived from 100 Mhz + 50% is
710 	 * preferable to 200 Mhz + 25% because absolute settings are more
711 	 * efficient since they often change the voltage as well.
712 	 */
713 	TAILQ_FOREACH_REVERSE(search, &sc->all_levels, cf_level_lst, link) {
714 		/* Add each setting to the level, duplicating if necessary. */
715 		for (i = 0; i < set_arr->count; i++) {
716 			set = &set_arr->sets[i];
717 
718 			/*
719 			 * If this setting is less than 100%, split the level
720 			 * into two and add this setting to the new level.
721 			 */
722 			fill = search;
723 			if (set->freq < 10000) {
724 				fill = cpufreq_dup_set(sc, search, set);
725 
726 				/*
727 				 * The new level was a duplicate of an existing
728 				 * level or its absolute setting is too high
729 				 * so we freed it.  For example, we discard a
730 				 * derived level of 1000 MHz/25% if a level
731 				 * of 500 MHz/100% already exists.
732 				 */
733 				if (fill == NULL)
734 					break;
735 			}
736 
737 			/* Add this setting to the existing or new level. */
738 			KASSERT(fill->rel_count < MAX_SETTINGS,
739 			    ("cpufreq: too many relative drivers (%d)",
740 			    MAX_SETTINGS));
741 			fill->rel_set[fill->rel_count] = *set;
742 			fill->rel_count++;
743 			CF_DEBUG(
744 			"expand set added rel setting %d%% to %d level\n",
745 			    set->freq / 100, fill->total_set.freq);
746 		}
747 	}
748 
749 	return (0);
750 }
751 
752 static struct cf_level *
753 cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup,
754     struct cf_setting *set)
755 {
756 	struct cf_level_lst *list;
757 	struct cf_level *fill, *itr;
758 	struct cf_setting *fill_set, *itr_set;
759 	int i;
760 
761 	CF_MTX_ASSERT(&sc->lock);
762 
763 	/*
764 	 * Create a new level, copy it from the old one, and update the
765 	 * total frequency and power by the percentage specified in the
766 	 * relative setting.
767 	 */
768 	fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT);
769 	if (fill == NULL)
770 		return (NULL);
771 	*fill = *dup;
772 	fill_set = &fill->total_set;
773 	fill_set->freq =
774 	    ((uint64_t)fill_set->freq * set->freq) / 10000;
775 	if (fill_set->power != CPUFREQ_VAL_UNKNOWN) {
776 		fill_set->power = ((uint64_t)fill_set->power * set->freq)
777 		    / 10000;
778 	}
779 	if (set->lat != CPUFREQ_VAL_UNKNOWN) {
780 		if (fill_set->lat != CPUFREQ_VAL_UNKNOWN)
781 			fill_set->lat += set->lat;
782 		else
783 			fill_set->lat = set->lat;
784 	}
785 	CF_DEBUG("dup set considering derived setting %d\n", fill_set->freq);
786 
787 	/*
788 	 * If we copied an old level that we already modified (say, at 100%),
789 	 * we need to remove that setting before adding this one.  Since we
790 	 * process each setting array in order, we know any settings for this
791 	 * driver will be found at the end.
792 	 */
793 	for (i = fill->rel_count; i != 0; i--) {
794 		if (fill->rel_set[i - 1].dev != set->dev)
795 			break;
796 		CF_DEBUG("removed last relative driver: %s\n",
797 		    device_get_nameunit(set->dev));
798 		fill->rel_count--;
799 	}
800 
801 	/*
802 	 * Insert the new level in sorted order.  If it is a duplicate of an
803 	 * existing level (1) or has an absolute setting higher than the
804 	 * existing level (2), do not add it.  We can do this since any such
805 	 * level is guaranteed use less power.  For example (1), a level with
806 	 * one absolute setting of 800 Mhz uses less power than one composed
807 	 * of an absolute setting of 1600 Mhz and a relative setting at 50%.
808 	 * Also for example (2), a level of 800 Mhz/75% is preferable to
809 	 * 1600 Mhz/25% even though the latter has a lower total frequency.
810 	 */
811 	list = &sc->all_levels;
812 	KASSERT(!TAILQ_EMPTY(list), ("all levels list empty in dup set"));
813 	TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) {
814 		itr_set = &itr->total_set;
815 		if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) {
816 			CF_DEBUG("dup set rejecting %d (dupe)\n",
817 			    fill_set->freq);
818 			itr = NULL;
819 			break;
820 		} else if (fill_set->freq < itr_set->freq) {
821 			if (fill->abs_set.freq <= itr->abs_set.freq) {
822 				CF_DEBUG(
823 			"dup done, inserting new level %d after %d\n",
824 				    fill_set->freq, itr_set->freq);
825 				TAILQ_INSERT_AFTER(list, itr, fill, link);
826 				sc->all_count++;
827 			} else {
828 				CF_DEBUG("dup set rejecting %d (abs too big)\n",
829 				    fill_set->freq);
830 				itr = NULL;
831 			}
832 			break;
833 		}
834 	}
835 
836 	/* We didn't find a good place for this new level so free it. */
837 	if (itr == NULL) {
838 		CF_DEBUG("dup set freeing new level %d (not optimal)\n",
839 		    fill_set->freq);
840 		free(fill, M_TEMP);
841 		fill = NULL;
842 	}
843 
844 	return (fill);
845 }
846 
847 static int
848 cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS)
849 {
850 	struct cpufreq_softc *sc;
851 	struct cf_level *levels;
852 	int count, devcount, error, freq, i, n;
853 	device_t *devs;
854 
855 	devs = NULL;
856 	sc = oidp->oid_arg1;
857 	levels = malloc(CF_MAX_LEVELS * sizeof(*levels), M_TEMP, M_NOWAIT);
858 	if (levels == NULL)
859 		return (ENOMEM);
860 
861 	error = CPUFREQ_GET(sc->dev, &levels[0]);
862 	if (error)
863 		goto out;
864 	freq = levels[0].total_set.freq;
865 	error = sysctl_handle_int(oidp, &freq, 0, req);
866 	if (error != 0 || req->newptr == NULL)
867 		goto out;
868 
869 	/*
870 	 * While we only call cpufreq_get() on one device (assuming all
871 	 * CPUs have equal levels), we call cpufreq_set() on all CPUs.
872 	 * This is needed for some MP systems.
873 	 */
874 	error = devclass_get_devices(cpufreq_dc, &devs, &devcount);
875 	if (error)
876 		goto out;
877 	for (n = 0; n < devcount; n++) {
878 		count = CF_MAX_LEVELS;
879 		error = CPUFREQ_LEVELS(devs[n], levels, &count);
880 		if (error) {
881 			if (error == E2BIG)
882 				printf(
883 			"cpufreq: need to increase CF_MAX_LEVELS\n");
884 			break;
885 		}
886 		for (i = 0; i < count; i++) {
887 			if (CPUFREQ_CMP(levels[i].total_set.freq, freq)) {
888 				error = CPUFREQ_SET(devs[n], &levels[i],
889 				    CPUFREQ_PRIO_USER);
890 				break;
891 			}
892 		}
893 		if (i == count) {
894 			error = EINVAL;
895 			break;
896 		}
897 	}
898 
899 out:
900 	if (devs)
901 		free(devs, M_TEMP);
902 	if (levels)
903 		free(levels, M_TEMP);
904 	return (error);
905 }
906 
907 static int
908 cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS)
909 {
910 	struct cpufreq_softc *sc;
911 	struct cf_level *levels;
912 	struct cf_setting *set;
913 	struct sbuf sb;
914 	int count, error, i;
915 
916 	sc = oidp->oid_arg1;
917 	sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
918 
919 	/* Get settings from the device and generate the output string. */
920 	count = CF_MAX_LEVELS;
921 	levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
922 	if (levels == NULL)
923 		return (ENOMEM);
924 	error = CPUFREQ_LEVELS(sc->dev, levels, &count);
925 	if (error) {
926 		if (error == E2BIG)
927 			printf("cpufreq: need to increase CF_MAX_LEVELS\n");
928 		goto out;
929 	}
930 	if (count) {
931 		for (i = 0; i < count; i++) {
932 			set = &levels[i].total_set;
933 			sbuf_printf(&sb, "%d/%d ", set->freq, set->power);
934 		}
935 	} else
936 		sbuf_cpy(&sb, "0");
937 	sbuf_trim(&sb);
938 	sbuf_finish(&sb);
939 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
940 
941 out:
942 	free(levels, M_TEMP);
943 	sbuf_delete(&sb);
944 	return (error);
945 }
946 
947 static int
948 cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS)
949 {
950 	device_t dev;
951 	struct cf_setting *sets;
952 	struct sbuf sb;
953 	int error, i, set_count;
954 
955 	dev = oidp->oid_arg1;
956 	sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
957 
958 	/* Get settings from the device and generate the output string. */
959 	set_count = MAX_SETTINGS;
960 	sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT);
961 	if (sets == NULL)
962 		return (ENOMEM);
963 	error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count);
964 	if (error)
965 		goto out;
966 	if (set_count) {
967 		for (i = 0; i < set_count; i++)
968 			sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power);
969 	} else
970 		sbuf_cpy(&sb, "0");
971 	sbuf_trim(&sb);
972 	sbuf_finish(&sb);
973 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
974 
975 out:
976 	free(sets, M_TEMP);
977 	sbuf_delete(&sb);
978 	return (error);
979 }
980 
981 int
982 cpufreq_register(device_t dev)
983 {
984 	struct cpufreq_softc *sc;
985 	device_t cf_dev, cpu_dev;
986 
987 	/* Add a sysctl to get each driver's settings separately. */
988 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
989 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
990 	    OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, dev, 0,
991 	    cpufreq_settings_sysctl, "A", "CPU frequency driver settings");
992 
993 	/*
994 	 * Add only one cpufreq device to each CPU.  Currently, all CPUs
995 	 * must offer the same levels and be switched at the same time.
996 	 */
997 	cpu_dev = device_get_parent(dev);
998 	if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) {
999 		sc = device_get_softc(cf_dev);
1000 		sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
1001 		return (0);
1002 	}
1003 
1004 	/* Add the child device and possibly sysctls. */
1005 	cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", -1);
1006 	if (cf_dev == NULL)
1007 		return (ENOMEM);
1008 	device_quiet(cf_dev);
1009 
1010 	return (device_probe_and_attach(cf_dev));
1011 }
1012 
1013 int
1014 cpufreq_unregister(device_t dev)
1015 {
1016 	device_t cf_dev, *devs;
1017 	int cfcount, devcount, error, i, type;
1018 
1019 	/*
1020 	 * If this is the last cpufreq child device, remove the control
1021 	 * device as well.  We identify cpufreq children by calling a method
1022 	 * they support.
1023 	 */
1024 	error = device_get_children(device_get_parent(dev), &devs, &devcount);
1025 	if (error)
1026 		return (error);
1027 	cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1);
1028 	if (cf_dev == NULL) {
1029 		device_printf(dev,
1030 	"warning: cpufreq_unregister called with no cpufreq device active\n");
1031 		return (0);
1032 	}
1033 	cfcount = 0;
1034 	for (i = 0; i < devcount; i++) {
1035 		if (!device_is_attached(devs[i]))
1036 			continue;
1037 		if (CPUFREQ_DRV_TYPE(devs[i], &type) == 0)
1038 			cfcount++;
1039 	}
1040 	if (cfcount <= 1)
1041 		device_delete_child(device_get_parent(cf_dev), cf_dev);
1042 	free(devs, M_TEMP);
1043 
1044 	return (0);
1045 }
1046 
1047 int
1048 cpufreq_settings_changed(device_t dev)
1049 {
1050 
1051 	EVENTHANDLER_INVOKE(cpufreq_levels_changed,
1052 	    device_get_unit(device_get_parent(dev)));
1053 	return (0);
1054 }
1055