xref: /freebsd/sys/kern/kern_cpu.c (revision 076b94438c7d42c1b4661ed1e12e3b12ca69361a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2004-2007 Nate Lawson (SDG)
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/eventhandler.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/proc.h>
41 #include <sys/queue.h>
42 #include <sys/sbuf.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/sysctl.h>
46 #include <sys/systm.h>
47 #include <sys/sx.h>
48 #include <sys/timetc.h>
49 #include <sys/taskqueue.h>
50 
51 #include "cpufreq_if.h"
52 
53 /*
54  * Common CPU frequency glue code.  Drivers for specific hardware can
55  * attach this interface to allow users to get/set the CPU frequency.
56  */
57 
58 /*
59  * Number of levels we can handle.  Levels are synthesized from settings
60  * so for M settings and N drivers, there may be M*N levels.
61  */
62 #define CF_MAX_LEVELS	256
63 
64 struct cf_saved_freq {
65 	struct cf_level			level;
66 	int				priority;
67 	SLIST_ENTRY(cf_saved_freq)	link;
68 };
69 
70 struct cpufreq_softc {
71 	struct sx			lock;
72 	struct cf_level			curr_level;
73 	int				curr_priority;
74 	SLIST_HEAD(, cf_saved_freq)	saved_freq;
75 	struct cf_level_lst		all_levels;
76 	int				all_count;
77 	int				max_mhz;
78 	device_t			dev;
79 	struct sysctl_ctx_list		sysctl_ctx;
80 	struct task			startup_task;
81 	struct cf_level			*levels_buf;
82 };
83 
84 struct cf_setting_array {
85 	struct cf_setting		sets[MAX_SETTINGS];
86 	int				count;
87 	TAILQ_ENTRY(cf_setting_array)	link;
88 };
89 
90 TAILQ_HEAD(cf_setting_lst, cf_setting_array);
91 
92 #define CF_MTX_INIT(x)		sx_init((x), "cpufreq lock")
93 #define CF_MTX_LOCK(x)		sx_xlock((x))
94 #define CF_MTX_UNLOCK(x)	sx_xunlock((x))
95 #define CF_MTX_ASSERT(x)	sx_assert((x), SX_XLOCKED)
96 
97 #define CF_DEBUG(msg...)	do {		\
98 	if (cf_verbose)				\
99 		printf("cpufreq: " msg);	\
100 	} while (0)
101 
102 static int	cpufreq_attach(device_t dev);
103 static void	cpufreq_startup_task(void *ctx, int pending);
104 static int	cpufreq_detach(device_t dev);
105 static int	cf_set_method(device_t dev, const struct cf_level *level,
106 		    int priority);
107 static int	cf_get_method(device_t dev, struct cf_level *level);
108 static int	cf_levels_method(device_t dev, struct cf_level *levels,
109 		    int *count);
110 static int	cpufreq_insert_abs(struct cpufreq_softc *sc,
111 		    struct cf_setting *sets, int count);
112 static int	cpufreq_expand_set(struct cpufreq_softc *sc,
113 		    struct cf_setting_array *set_arr);
114 static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc,
115 		    struct cf_level *dup, struct cf_setting *set);
116 static int	cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS);
117 static int	cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS);
118 static int	cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS);
119 
120 static device_method_t cpufreq_methods[] = {
121 	DEVMETHOD(device_probe,		bus_generic_probe),
122 	DEVMETHOD(device_attach,	cpufreq_attach),
123 	DEVMETHOD(device_detach,	cpufreq_detach),
124 
125         DEVMETHOD(cpufreq_set,		cf_set_method),
126         DEVMETHOD(cpufreq_get,		cf_get_method),
127         DEVMETHOD(cpufreq_levels,	cf_levels_method),
128 	{0, 0}
129 };
130 static driver_t cpufreq_driver = {
131 	"cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc)
132 };
133 static devclass_t cpufreq_dc;
134 DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, cpufreq_dc, 0, 0);
135 
136 static int		cf_lowest_freq;
137 static int		cf_verbose;
138 static SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL,
139     "cpufreq debugging");
140 SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RWTUN, &cf_lowest_freq, 1,
141     "Don't provide levels below this frequency.");
142 SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RWTUN, &cf_verbose, 1,
143     "Print verbose debugging messages");
144 
145 static int
146 cpufreq_attach(device_t dev)
147 {
148 	struct cpufreq_softc *sc;
149 	struct pcpu *pc;
150 	device_t parent;
151 	uint64_t rate;
152 	int numdevs;
153 
154 	CF_DEBUG("initializing %s\n", device_get_nameunit(dev));
155 	sc = device_get_softc(dev);
156 	parent = device_get_parent(dev);
157 	sc->dev = dev;
158 	sysctl_ctx_init(&sc->sysctl_ctx);
159 	TAILQ_INIT(&sc->all_levels);
160 	CF_MTX_INIT(&sc->lock);
161 	sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
162 	SLIST_INIT(&sc->saved_freq);
163 	/* Try to get nominal CPU freq to use it as maximum later if needed */
164 	sc->max_mhz = cpu_get_nominal_mhz(dev);
165 	/* If that fails, try to measure the current rate */
166 	if (sc->max_mhz <= 0) {
167 		pc = cpu_get_pcpu(dev);
168 		if (cpu_est_clockrate(pc->pc_cpuid, &rate) == 0)
169 			sc->max_mhz = rate / 1000000;
170 		else
171 			sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
172 	}
173 
174 	/*
175 	 * Only initialize one set of sysctls for all CPUs.  In the future,
176 	 * if multiple CPUs can have different settings, we can move these
177 	 * sysctls to be under every CPU instead of just the first one.
178 	 */
179 	numdevs = devclass_get_count(cpufreq_dc);
180 	if (numdevs > 1)
181 		return (0);
182 
183 	CF_DEBUG("initializing one-time data for %s\n",
184 	    device_get_nameunit(dev));
185 	sc->levels_buf = malloc(CF_MAX_LEVELS * sizeof(*sc->levels_buf),
186 	    M_DEVBUF, M_WAITOK);
187 	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
188 	    SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
189 	    OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
190 	    cpufreq_curr_sysctl, "I", "Current CPU frequency");
191 	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
192 	    SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
193 	    OID_AUTO, "freq_levels", CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
194 	    cpufreq_levels_sysctl, "A", "CPU frequency levels");
195 
196 	/*
197 	 * Queue a one-shot broadcast that levels have changed.
198 	 * It will run once the system has completed booting.
199 	 */
200 	TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, dev);
201 	taskqueue_enqueue(taskqueue_thread, &sc->startup_task);
202 
203 	return (0);
204 }
205 
206 /* Handle any work to be done for all drivers that attached during boot. */
207 static void
208 cpufreq_startup_task(void *ctx, int pending)
209 {
210 
211 	cpufreq_settings_changed((device_t)ctx);
212 }
213 
214 static int
215 cpufreq_detach(device_t dev)
216 {
217 	struct cpufreq_softc *sc;
218 	struct cf_saved_freq *saved_freq;
219 	int numdevs;
220 
221 	CF_DEBUG("shutdown %s\n", device_get_nameunit(dev));
222 	sc = device_get_softc(dev);
223 	sysctl_ctx_free(&sc->sysctl_ctx);
224 
225 	while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) {
226 		SLIST_REMOVE_HEAD(&sc->saved_freq, link);
227 		free(saved_freq, M_TEMP);
228 	}
229 
230 	/* Only clean up these resources when the last device is detaching. */
231 	numdevs = devclass_get_count(cpufreq_dc);
232 	if (numdevs == 1) {
233 		CF_DEBUG("final shutdown for %s\n", device_get_nameunit(dev));
234 		free(sc->levels_buf, M_DEVBUF);
235 	}
236 
237 	return (0);
238 }
239 
240 static int
241 cf_set_method(device_t dev, const struct cf_level *level, int priority)
242 {
243 	struct cpufreq_softc *sc;
244 	const struct cf_setting *set;
245 	struct cf_saved_freq *saved_freq, *curr_freq;
246 	struct pcpu *pc;
247 	int error, i;
248 	u_char pri;
249 
250 	sc = device_get_softc(dev);
251 	error = 0;
252 	set = NULL;
253 	saved_freq = NULL;
254 
255 	/* We are going to change levels so notify the pre-change handler. */
256 	EVENTHANDLER_INVOKE(cpufreq_pre_change, level, &error);
257 	if (error != 0) {
258 		EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
259 		return (error);
260 	}
261 
262 	CF_MTX_LOCK(&sc->lock);
263 
264 #ifdef SMP
265 #ifdef EARLY_AP_STARTUP
266 	MPASS(mp_ncpus == 1 || smp_started);
267 #else
268 	/*
269 	 * If still booting and secondary CPUs not started yet, don't allow
270 	 * changing the frequency until they're online.  This is because we
271 	 * can't switch to them using sched_bind() and thus we'd only be
272 	 * switching the main CPU.  XXXTODO: Need to think more about how to
273 	 * handle having different CPUs at different frequencies.
274 	 */
275 	if (mp_ncpus > 1 && !smp_started) {
276 		device_printf(dev, "rejecting change, SMP not started yet\n");
277 		error = ENXIO;
278 		goto out;
279 	}
280 #endif
281 #endif /* SMP */
282 
283 	/*
284 	 * If the requested level has a lower priority, don't allow
285 	 * the new level right now.
286 	 */
287 	if (priority < sc->curr_priority) {
288 		CF_DEBUG("ignoring, curr prio %d less than %d\n", priority,
289 		    sc->curr_priority);
290 		error = EPERM;
291 		goto out;
292 	}
293 
294 	/*
295 	 * If the caller didn't specify a level and one is saved, prepare to
296 	 * restore the saved level.  If none has been saved, return an error.
297 	 */
298 	if (level == NULL) {
299 		saved_freq = SLIST_FIRST(&sc->saved_freq);
300 		if (saved_freq == NULL) {
301 			CF_DEBUG("NULL level, no saved level\n");
302 			error = ENXIO;
303 			goto out;
304 		}
305 		level = &saved_freq->level;
306 		priority = saved_freq->priority;
307 		CF_DEBUG("restoring saved level, freq %d prio %d\n",
308 		    level->total_set.freq, priority);
309 	}
310 
311 	/* Reject levels that are below our specified threshold. */
312 	if (level->total_set.freq < cf_lowest_freq) {
313 		CF_DEBUG("rejecting freq %d, less than %d limit\n",
314 		    level->total_set.freq, cf_lowest_freq);
315 		error = EINVAL;
316 		goto out;
317 	}
318 
319 	/* If already at this level, just return. */
320 	if (sc->curr_level.total_set.freq == level->total_set.freq) {
321 		CF_DEBUG("skipping freq %d, same as current level %d\n",
322 		    level->total_set.freq, sc->curr_level.total_set.freq);
323 		goto skip;
324 	}
325 
326 	/* First, set the absolute frequency via its driver. */
327 	set = &level->abs_set;
328 	if (set->dev) {
329 		if (!device_is_attached(set->dev)) {
330 			error = ENXIO;
331 			goto out;
332 		}
333 
334 		/* Bind to the target CPU before switching. */
335 		pc = cpu_get_pcpu(set->dev);
336 		thread_lock(curthread);
337 		pri = curthread->td_priority;
338 		sched_prio(curthread, PRI_MIN);
339 		sched_bind(curthread, pc->pc_cpuid);
340 		thread_unlock(curthread);
341 		CF_DEBUG("setting abs freq %d on %s (cpu %d)\n", set->freq,
342 		    device_get_nameunit(set->dev), PCPU_GET(cpuid));
343 		error = CPUFREQ_DRV_SET(set->dev, set);
344 		thread_lock(curthread);
345 		sched_unbind(curthread);
346 		sched_prio(curthread, pri);
347 		thread_unlock(curthread);
348 		if (error) {
349 			goto out;
350 		}
351 	}
352 
353 	/* Next, set any/all relative frequencies via their drivers. */
354 	for (i = 0; i < level->rel_count; i++) {
355 		set = &level->rel_set[i];
356 		if (!device_is_attached(set->dev)) {
357 			error = ENXIO;
358 			goto out;
359 		}
360 
361 		/* Bind to the target CPU before switching. */
362 		pc = cpu_get_pcpu(set->dev);
363 		thread_lock(curthread);
364 		pri = curthread->td_priority;
365 		sched_prio(curthread, PRI_MIN);
366 		sched_bind(curthread, pc->pc_cpuid);
367 		thread_unlock(curthread);
368 		CF_DEBUG("setting rel freq %d on %s (cpu %d)\n", set->freq,
369 		    device_get_nameunit(set->dev), PCPU_GET(cpuid));
370 		error = CPUFREQ_DRV_SET(set->dev, set);
371 		thread_lock(curthread);
372 		sched_unbind(curthread);
373 		sched_prio(curthread, pri);
374 		thread_unlock(curthread);
375 		if (error) {
376 			/* XXX Back out any successful setting? */
377 			goto out;
378 		}
379 	}
380 
381 skip:
382 	/*
383 	 * Before recording the current level, check if we're going to a
384 	 * higher priority.  If so, save the previous level and priority.
385 	 */
386 	if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN &&
387 	    priority > sc->curr_priority) {
388 		CF_DEBUG("saving level, freq %d prio %d\n",
389 		    sc->curr_level.total_set.freq, sc->curr_priority);
390 		curr_freq = malloc(sizeof(*curr_freq), M_TEMP, M_NOWAIT);
391 		if (curr_freq == NULL) {
392 			error = ENOMEM;
393 			goto out;
394 		}
395 		curr_freq->level = sc->curr_level;
396 		curr_freq->priority = sc->curr_priority;
397 		SLIST_INSERT_HEAD(&sc->saved_freq, curr_freq, link);
398 	}
399 	sc->curr_level = *level;
400 	sc->curr_priority = priority;
401 
402 	/* If we were restoring a saved state, reset it to "unused". */
403 	if (saved_freq != NULL) {
404 		CF_DEBUG("resetting saved level\n");
405 		sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
406 		SLIST_REMOVE_HEAD(&sc->saved_freq, link);
407 		free(saved_freq, M_TEMP);
408 	}
409 
410 out:
411 	CF_MTX_UNLOCK(&sc->lock);
412 
413 	/*
414 	 * We changed levels (or attempted to) so notify the post-change
415 	 * handler of new frequency or error.
416 	 */
417 	EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
418 	if (error && set)
419 		device_printf(set->dev, "set freq failed, err %d\n", error);
420 
421 	return (error);
422 }
423 
424 static int
425 cf_get_method(device_t dev, struct cf_level *level)
426 {
427 	struct cpufreq_softc *sc;
428 	struct cf_level *levels;
429 	struct cf_setting *curr_set, set;
430 	struct pcpu *pc;
431 	device_t *devs;
432 	int bdiff, count, diff, error, i, n, numdevs;
433 	uint64_t rate;
434 
435 	sc = device_get_softc(dev);
436 	error = 0;
437 	levels = NULL;
438 
439 	/* If we already know the current frequency, we're done. */
440 	CF_MTX_LOCK(&sc->lock);
441 	curr_set = &sc->curr_level.total_set;
442 	if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
443 		CF_DEBUG("get returning known freq %d\n", curr_set->freq);
444 		goto out;
445 	}
446 	CF_MTX_UNLOCK(&sc->lock);
447 
448 	/*
449 	 * We need to figure out the current level.  Loop through every
450 	 * driver, getting the current setting.  Then, attempt to get a best
451 	 * match of settings against each level.
452 	 */
453 	count = CF_MAX_LEVELS;
454 	levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
455 	if (levels == NULL)
456 		return (ENOMEM);
457 	error = CPUFREQ_LEVELS(sc->dev, levels, &count);
458 	if (error) {
459 		if (error == E2BIG)
460 			printf("cpufreq: need to increase CF_MAX_LEVELS\n");
461 		free(levels, M_TEMP);
462 		return (error);
463 	}
464 	error = device_get_children(device_get_parent(dev), &devs, &numdevs);
465 	if (error) {
466 		free(levels, M_TEMP);
467 		return (error);
468 	}
469 
470 	/*
471 	 * Reacquire the lock and search for the given level.
472 	 *
473 	 * XXX Note: this is not quite right since we really need to go
474 	 * through each level and compare both absolute and relative
475 	 * settings for each driver in the system before making a match.
476 	 * The estimation code below catches this case though.
477 	 */
478 	CF_MTX_LOCK(&sc->lock);
479 	for (n = 0; n < numdevs && curr_set->freq == CPUFREQ_VAL_UNKNOWN; n++) {
480 		if (!device_is_attached(devs[n]))
481 			continue;
482 		if (CPUFREQ_DRV_GET(devs[n], &set) != 0)
483 			continue;
484 		for (i = 0; i < count; i++) {
485 			if (set.freq == levels[i].total_set.freq) {
486 				sc->curr_level = levels[i];
487 				break;
488 			}
489 		}
490 	}
491 	free(devs, M_TEMP);
492 	if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
493 		CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq);
494 		goto out;
495 	}
496 
497 	/*
498 	 * We couldn't find an exact match, so attempt to estimate and then
499 	 * match against a level.
500 	 */
501 	pc = cpu_get_pcpu(dev);
502 	if (pc == NULL) {
503 		error = ENXIO;
504 		goto out;
505 	}
506 	cpu_est_clockrate(pc->pc_cpuid, &rate);
507 	rate /= 1000000;
508 	bdiff = 1 << 30;
509 	for (i = 0; i < count; i++) {
510 		diff = abs(levels[i].total_set.freq - rate);
511 		if (diff < bdiff) {
512 			bdiff = diff;
513 			sc->curr_level = levels[i];
514 		}
515 	}
516 	CF_DEBUG("get estimated freq %d\n", curr_set->freq);
517 
518 out:
519 	if (error == 0)
520 		*level = sc->curr_level;
521 
522 	CF_MTX_UNLOCK(&sc->lock);
523 	if (levels)
524 		free(levels, M_TEMP);
525 	return (error);
526 }
527 
528 static int
529 cf_levels_method(device_t dev, struct cf_level *levels, int *count)
530 {
531 	struct cf_setting_array *set_arr;
532 	struct cf_setting_lst rel_sets;
533 	struct cpufreq_softc *sc;
534 	struct cf_level *lev;
535 	struct cf_setting *sets;
536 	struct pcpu *pc;
537 	device_t *devs;
538 	int error, i, numdevs, set_count, type;
539 	uint64_t rate;
540 
541 	if (levels == NULL || count == NULL)
542 		return (EINVAL);
543 
544 	TAILQ_INIT(&rel_sets);
545 	sc = device_get_softc(dev);
546 	error = device_get_children(device_get_parent(dev), &devs, &numdevs);
547 	if (error)
548 		return (error);
549 	sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT);
550 	if (sets == NULL) {
551 		free(devs, M_TEMP);
552 		return (ENOMEM);
553 	}
554 
555 	/* Get settings from all cpufreq drivers. */
556 	CF_MTX_LOCK(&sc->lock);
557 	for (i = 0; i < numdevs; i++) {
558 		/* Skip devices that aren't ready. */
559 		if (!device_is_attached(devs[i]))
560 			continue;
561 
562 		/*
563 		 * Get settings, skipping drivers that offer no settings or
564 		 * provide settings for informational purposes only.
565 		 */
566 		error = CPUFREQ_DRV_TYPE(devs[i], &type);
567 		if (error || (type & CPUFREQ_FLAG_INFO_ONLY)) {
568 			if (error == 0) {
569 				CF_DEBUG("skipping info-only driver %s\n",
570 				    device_get_nameunit(devs[i]));
571 			}
572 			continue;
573 		}
574 		set_count = MAX_SETTINGS;
575 		error = CPUFREQ_DRV_SETTINGS(devs[i], sets, &set_count);
576 		if (error || set_count == 0)
577 			continue;
578 
579 		/* Add the settings to our absolute/relative lists. */
580 		switch (type & CPUFREQ_TYPE_MASK) {
581 		case CPUFREQ_TYPE_ABSOLUTE:
582 			error = cpufreq_insert_abs(sc, sets, set_count);
583 			break;
584 		case CPUFREQ_TYPE_RELATIVE:
585 			CF_DEBUG("adding %d relative settings\n", set_count);
586 			set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT);
587 			if (set_arr == NULL) {
588 				error = ENOMEM;
589 				goto out;
590 			}
591 			bcopy(sets, set_arr->sets, set_count * sizeof(*sets));
592 			set_arr->count = set_count;
593 			TAILQ_INSERT_TAIL(&rel_sets, set_arr, link);
594 			break;
595 		default:
596 			error = EINVAL;
597 		}
598 		if (error)
599 			goto out;
600 	}
601 
602 	/*
603 	 * If there are no absolute levels, create a fake one at 100%.  We
604 	 * then cache the clockrate for later use as our base frequency.
605 	 */
606 	if (TAILQ_EMPTY(&sc->all_levels)) {
607 		if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) {
608 			sc->max_mhz = cpu_get_nominal_mhz(dev);
609 			/*
610 			 * If the CPU can't report a rate for 100%, hope
611 			 * the CPU is running at its nominal rate right now,
612 			 * and use that instead.
613 			 */
614 			if (sc->max_mhz <= 0) {
615 				pc = cpu_get_pcpu(dev);
616 				cpu_est_clockrate(pc->pc_cpuid, &rate);
617 				sc->max_mhz = rate / 1000000;
618 			}
619 		}
620 		memset(&sets[0], CPUFREQ_VAL_UNKNOWN, sizeof(*sets));
621 		sets[0].freq = sc->max_mhz;
622 		sets[0].dev = NULL;
623 		error = cpufreq_insert_abs(sc, sets, 1);
624 		if (error)
625 			goto out;
626 	}
627 
628 	/* Create a combined list of absolute + relative levels. */
629 	TAILQ_FOREACH(set_arr, &rel_sets, link)
630 		cpufreq_expand_set(sc, set_arr);
631 
632 	/* If the caller doesn't have enough space, return the actual count. */
633 	if (sc->all_count > *count) {
634 		*count = sc->all_count;
635 		error = E2BIG;
636 		goto out;
637 	}
638 
639 	/* Finally, output the list of levels. */
640 	i = 0;
641 	TAILQ_FOREACH(lev, &sc->all_levels, link) {
642 
643 		/* Skip levels that have a frequency that is too low. */
644 		if (lev->total_set.freq < cf_lowest_freq) {
645 			sc->all_count--;
646 			continue;
647 		}
648 
649 		levels[i] = *lev;
650 		i++;
651 	}
652 	*count = sc->all_count;
653 	error = 0;
654 
655 out:
656 	/* Clear all levels since we regenerate them each time. */
657 	while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) {
658 		TAILQ_REMOVE(&sc->all_levels, lev, link);
659 		free(lev, M_TEMP);
660 	}
661 	sc->all_count = 0;
662 
663 	CF_MTX_UNLOCK(&sc->lock);
664 	while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) {
665 		TAILQ_REMOVE(&rel_sets, set_arr, link);
666 		free(set_arr, M_TEMP);
667 	}
668 	free(devs, M_TEMP);
669 	free(sets, M_TEMP);
670 	return (error);
671 }
672 
673 /*
674  * Create levels for an array of absolute settings and insert them in
675  * sorted order in the specified list.
676  */
677 static int
678 cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets,
679     int count)
680 {
681 	struct cf_level_lst *list;
682 	struct cf_level *level, *search;
683 	int i, inserted;
684 
685 	CF_MTX_ASSERT(&sc->lock);
686 
687 	list = &sc->all_levels;
688 	for (i = 0; i < count; i++) {
689 		level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO);
690 		if (level == NULL)
691 			return (ENOMEM);
692 		level->abs_set = sets[i];
693 		level->total_set = sets[i];
694 		level->total_set.dev = NULL;
695 		sc->all_count++;
696 		inserted = 0;
697 
698 		if (TAILQ_EMPTY(list)) {
699 			CF_DEBUG("adding abs setting %d at head\n",
700 			    sets[i].freq);
701 			TAILQ_INSERT_HEAD(list, level, link);
702 			continue;
703 		}
704 
705 		TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link)
706 			if (sets[i].freq <= search->total_set.freq) {
707 				CF_DEBUG("adding abs setting %d after %d\n",
708 				    sets[i].freq, search->total_set.freq);
709 				TAILQ_INSERT_AFTER(list, search, level, link);
710 				inserted = 1;
711 				break;
712 			}
713 
714 		if (inserted == 0) {
715 			TAILQ_FOREACH(search, list, link)
716 				if (sets[i].freq >= search->total_set.freq) {
717 					CF_DEBUG("adding abs setting %d before %d\n",
718 					    sets[i].freq, search->total_set.freq);
719 					TAILQ_INSERT_BEFORE(search, level, link);
720 					break;
721 				}
722 		}
723 	}
724 
725 	return (0);
726 }
727 
728 /*
729  * Expand a group of relative settings, creating derived levels from them.
730  */
731 static int
732 cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr)
733 {
734 	struct cf_level *fill, *search;
735 	struct cf_setting *set;
736 	int i;
737 
738 	CF_MTX_ASSERT(&sc->lock);
739 
740 	/*
741 	 * Walk the set of all existing levels in reverse.  This is so we
742 	 * create derived states from the lowest absolute settings first
743 	 * and discard duplicates created from higher absolute settings.
744 	 * For instance, a level of 50 Mhz derived from 100 Mhz + 50% is
745 	 * preferable to 200 Mhz + 25% because absolute settings are more
746 	 * efficient since they often change the voltage as well.
747 	 */
748 	TAILQ_FOREACH_REVERSE(search, &sc->all_levels, cf_level_lst, link) {
749 		/* Add each setting to the level, duplicating if necessary. */
750 		for (i = 0; i < set_arr->count; i++) {
751 			set = &set_arr->sets[i];
752 
753 			/*
754 			 * If this setting is less than 100%, split the level
755 			 * into two and add this setting to the new level.
756 			 */
757 			fill = search;
758 			if (set->freq < 10000) {
759 				fill = cpufreq_dup_set(sc, search, set);
760 
761 				/*
762 				 * The new level was a duplicate of an existing
763 				 * level or its absolute setting is too high
764 				 * so we freed it.  For example, we discard a
765 				 * derived level of 1000 MHz/25% if a level
766 				 * of 500 MHz/100% already exists.
767 				 */
768 				if (fill == NULL)
769 					break;
770 			}
771 
772 			/* Add this setting to the existing or new level. */
773 			KASSERT(fill->rel_count < MAX_SETTINGS,
774 			    ("cpufreq: too many relative drivers (%d)",
775 			    MAX_SETTINGS));
776 			fill->rel_set[fill->rel_count] = *set;
777 			fill->rel_count++;
778 			CF_DEBUG(
779 			"expand set added rel setting %d%% to %d level\n",
780 			    set->freq / 100, fill->total_set.freq);
781 		}
782 	}
783 
784 	return (0);
785 }
786 
787 static struct cf_level *
788 cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup,
789     struct cf_setting *set)
790 {
791 	struct cf_level_lst *list;
792 	struct cf_level *fill, *itr;
793 	struct cf_setting *fill_set, *itr_set;
794 	int i;
795 
796 	CF_MTX_ASSERT(&sc->lock);
797 
798 	/*
799 	 * Create a new level, copy it from the old one, and update the
800 	 * total frequency and power by the percentage specified in the
801 	 * relative setting.
802 	 */
803 	fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT);
804 	if (fill == NULL)
805 		return (NULL);
806 	*fill = *dup;
807 	fill_set = &fill->total_set;
808 	fill_set->freq =
809 	    ((uint64_t)fill_set->freq * set->freq) / 10000;
810 	if (fill_set->power != CPUFREQ_VAL_UNKNOWN) {
811 		fill_set->power = ((uint64_t)fill_set->power * set->freq)
812 		    / 10000;
813 	}
814 	if (set->lat != CPUFREQ_VAL_UNKNOWN) {
815 		if (fill_set->lat != CPUFREQ_VAL_UNKNOWN)
816 			fill_set->lat += set->lat;
817 		else
818 			fill_set->lat = set->lat;
819 	}
820 	CF_DEBUG("dup set considering derived setting %d\n", fill_set->freq);
821 
822 	/*
823 	 * If we copied an old level that we already modified (say, at 100%),
824 	 * we need to remove that setting before adding this one.  Since we
825 	 * process each setting array in order, we know any settings for this
826 	 * driver will be found at the end.
827 	 */
828 	for (i = fill->rel_count; i != 0; i--) {
829 		if (fill->rel_set[i - 1].dev != set->dev)
830 			break;
831 		CF_DEBUG("removed last relative driver: %s\n",
832 		    device_get_nameunit(set->dev));
833 		fill->rel_count--;
834 	}
835 
836 	/*
837 	 * Insert the new level in sorted order.  If it is a duplicate of an
838 	 * existing level (1) or has an absolute setting higher than the
839 	 * existing level (2), do not add it.  We can do this since any such
840 	 * level is guaranteed use less power.  For example (1), a level with
841 	 * one absolute setting of 800 Mhz uses less power than one composed
842 	 * of an absolute setting of 1600 Mhz and a relative setting at 50%.
843 	 * Also for example (2), a level of 800 Mhz/75% is preferable to
844 	 * 1600 Mhz/25% even though the latter has a lower total frequency.
845 	 */
846 	list = &sc->all_levels;
847 	KASSERT(!TAILQ_EMPTY(list), ("all levels list empty in dup set"));
848 	TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) {
849 		itr_set = &itr->total_set;
850 		if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) {
851 			CF_DEBUG("dup set rejecting %d (dupe)\n",
852 			    fill_set->freq);
853 			itr = NULL;
854 			break;
855 		} else if (fill_set->freq < itr_set->freq) {
856 			if (fill->abs_set.freq <= itr->abs_set.freq) {
857 				CF_DEBUG(
858 			"dup done, inserting new level %d after %d\n",
859 				    fill_set->freq, itr_set->freq);
860 				TAILQ_INSERT_AFTER(list, itr, fill, link);
861 				sc->all_count++;
862 			} else {
863 				CF_DEBUG("dup set rejecting %d (abs too big)\n",
864 				    fill_set->freq);
865 				itr = NULL;
866 			}
867 			break;
868 		}
869 	}
870 
871 	/* We didn't find a good place for this new level so free it. */
872 	if (itr == NULL) {
873 		CF_DEBUG("dup set freeing new level %d (not optimal)\n",
874 		    fill_set->freq);
875 		free(fill, M_TEMP);
876 		fill = NULL;
877 	}
878 
879 	return (fill);
880 }
881 
882 static int
883 cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS)
884 {
885 	struct cpufreq_softc *sc;
886 	struct cf_level *levels;
887 	int best, count, diff, bdiff, devcount, error, freq, i, n;
888 	device_t *devs;
889 
890 	devs = NULL;
891 	sc = oidp->oid_arg1;
892 	levels = sc->levels_buf;
893 
894 	error = CPUFREQ_GET(sc->dev, &levels[0]);
895 	if (error)
896 		goto out;
897 	freq = levels[0].total_set.freq;
898 	error = sysctl_handle_int(oidp, &freq, 0, req);
899 	if (error != 0 || req->newptr == NULL)
900 		goto out;
901 
902 	/*
903 	 * While we only call cpufreq_get() on one device (assuming all
904 	 * CPUs have equal levels), we call cpufreq_set() on all CPUs.
905 	 * This is needed for some MP systems.
906 	 */
907 	error = devclass_get_devices(cpufreq_dc, &devs, &devcount);
908 	if (error)
909 		goto out;
910 	for (n = 0; n < devcount; n++) {
911 		count = CF_MAX_LEVELS;
912 		error = CPUFREQ_LEVELS(devs[n], levels, &count);
913 		if (error) {
914 			if (error == E2BIG)
915 				printf(
916 			"cpufreq: need to increase CF_MAX_LEVELS\n");
917 			break;
918 		}
919 		best = 0;
920 		bdiff = 1 << 30;
921 		for (i = 0; i < count; i++) {
922 			diff = abs(levels[i].total_set.freq - freq);
923 			if (diff < bdiff) {
924 				bdiff = diff;
925 				best = i;
926 			}
927 		}
928 		error = CPUFREQ_SET(devs[n], &levels[best], CPUFREQ_PRIO_USER);
929 	}
930 
931 out:
932 	if (devs)
933 		free(devs, M_TEMP);
934 	return (error);
935 }
936 
937 static int
938 cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS)
939 {
940 	struct cpufreq_softc *sc;
941 	struct cf_level *levels;
942 	struct cf_setting *set;
943 	struct sbuf sb;
944 	int count, error, i;
945 
946 	sc = oidp->oid_arg1;
947 	sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
948 
949 	/* Get settings from the device and generate the output string. */
950 	count = CF_MAX_LEVELS;
951 	levels = sc->levels_buf;
952 	if (levels == NULL) {
953 		sbuf_delete(&sb);
954 		return (ENOMEM);
955 	}
956 	error = CPUFREQ_LEVELS(sc->dev, levels, &count);
957 	if (error) {
958 		if (error == E2BIG)
959 			printf("cpufreq: need to increase CF_MAX_LEVELS\n");
960 		goto out;
961 	}
962 	if (count) {
963 		for (i = 0; i < count; i++) {
964 			set = &levels[i].total_set;
965 			sbuf_printf(&sb, "%d/%d ", set->freq, set->power);
966 		}
967 	} else
968 		sbuf_cpy(&sb, "0");
969 	sbuf_trim(&sb);
970 	sbuf_finish(&sb);
971 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
972 
973 out:
974 	sbuf_delete(&sb);
975 	return (error);
976 }
977 
978 static int
979 cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS)
980 {
981 	device_t dev;
982 	struct cf_setting *sets;
983 	struct sbuf sb;
984 	int error, i, set_count;
985 
986 	dev = oidp->oid_arg1;
987 	sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
988 
989 	/* Get settings from the device and generate the output string. */
990 	set_count = MAX_SETTINGS;
991 	sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT);
992 	if (sets == NULL) {
993 		sbuf_delete(&sb);
994 		return (ENOMEM);
995 	}
996 	error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count);
997 	if (error)
998 		goto out;
999 	if (set_count) {
1000 		for (i = 0; i < set_count; i++)
1001 			sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power);
1002 	} else
1003 		sbuf_cpy(&sb, "0");
1004 	sbuf_trim(&sb);
1005 	sbuf_finish(&sb);
1006 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1007 
1008 out:
1009 	free(sets, M_TEMP);
1010 	sbuf_delete(&sb);
1011 	return (error);
1012 }
1013 
1014 int
1015 cpufreq_register(device_t dev)
1016 {
1017 	struct cpufreq_softc *sc;
1018 	device_t cf_dev, cpu_dev;
1019 
1020 	/* Add a sysctl to get each driver's settings separately. */
1021 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1022 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1023 	    OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, dev, 0,
1024 	    cpufreq_settings_sysctl, "A", "CPU frequency driver settings");
1025 
1026 	/*
1027 	 * Add only one cpufreq device to each CPU.  Currently, all CPUs
1028 	 * must offer the same levels and be switched at the same time.
1029 	 */
1030 	cpu_dev = device_get_parent(dev);
1031 	if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) {
1032 		sc = device_get_softc(cf_dev);
1033 		sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
1034 		return (0);
1035 	}
1036 
1037 	/* Add the child device and possibly sysctls. */
1038 	cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", -1);
1039 	if (cf_dev == NULL)
1040 		return (ENOMEM);
1041 	device_quiet(cf_dev);
1042 
1043 	return (device_probe_and_attach(cf_dev));
1044 }
1045 
1046 int
1047 cpufreq_unregister(device_t dev)
1048 {
1049 	device_t cf_dev, *devs;
1050 	int cfcount, devcount, error, i, type;
1051 
1052 	/*
1053 	 * If this is the last cpufreq child device, remove the control
1054 	 * device as well.  We identify cpufreq children by calling a method
1055 	 * they support.
1056 	 */
1057 	error = device_get_children(device_get_parent(dev), &devs, &devcount);
1058 	if (error)
1059 		return (error);
1060 	cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1);
1061 	if (cf_dev == NULL) {
1062 		device_printf(dev,
1063 	"warning: cpufreq_unregister called with no cpufreq device active\n");
1064 		free(devs, M_TEMP);
1065 		return (0);
1066 	}
1067 	cfcount = 0;
1068 	for (i = 0; i < devcount; i++) {
1069 		if (!device_is_attached(devs[i]))
1070 			continue;
1071 		if (CPUFREQ_DRV_TYPE(devs[i], &type) == 0)
1072 			cfcount++;
1073 	}
1074 	if (cfcount <= 1)
1075 		device_delete_child(device_get_parent(cf_dev), cf_dev);
1076 	free(devs, M_TEMP);
1077 
1078 	return (0);
1079 }
1080 
1081 int
1082 cpufreq_settings_changed(device_t dev)
1083 {
1084 
1085 	EVENTHANDLER_INVOKE(cpufreq_levels_changed,
1086 	    device_get_unit(device_get_parent(dev)));
1087 	return (0);
1088 }
1089