xref: /freebsd/sys/kern/kern_cpu.c (revision ba3c1f5972d7b90feb6e6da47905ff2757e0fe57)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2004-2007 Nate Lawson (SDG)
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/eventhandler.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/proc.h>
41 #include <sys/queue.h>
42 #include <sys/sbuf.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/sysctl.h>
46 #include <sys/systm.h>
47 #include <sys/sx.h>
48 #include <sys/timetc.h>
49 #include <sys/taskqueue.h>
50 
51 #include "cpufreq_if.h"
52 
53 /*
54  * Common CPU frequency glue code.  Drivers for specific hardware can
55  * attach this interface to allow users to get/set the CPU frequency.
56  */
57 
58 /*
59  * Number of levels we can handle.  Levels are synthesized from settings
60  * so for M settings and N drivers, there may be M*N levels.
61  */
62 #define CF_MAX_LEVELS	256
63 
64 struct cf_saved_freq {
65 	struct cf_level			level;
66 	int				priority;
67 	SLIST_ENTRY(cf_saved_freq)	link;
68 };
69 
70 struct cpufreq_softc {
71 	struct sx			lock;
72 	struct cf_level			curr_level;
73 	int				curr_priority;
74 	SLIST_HEAD(, cf_saved_freq)	saved_freq;
75 	struct cf_level_lst		all_levels;
76 	int				all_count;
77 	int				max_mhz;
78 	device_t			dev;
79 	device_t			cf_drv_dev;
80 	struct sysctl_ctx_list		sysctl_ctx;
81 	struct task			startup_task;
82 	struct cf_level			*levels_buf;
83 };
84 
85 struct cf_setting_array {
86 	struct cf_setting		sets[MAX_SETTINGS];
87 	int				count;
88 	TAILQ_ENTRY(cf_setting_array)	link;
89 };
90 
91 TAILQ_HEAD(cf_setting_lst, cf_setting_array);
92 
93 #define CF_MTX_INIT(x)		sx_init((x), "cpufreq lock")
94 #define CF_MTX_LOCK(x)		sx_xlock((x))
95 #define CF_MTX_UNLOCK(x)	sx_xunlock((x))
96 #define CF_MTX_ASSERT(x)	sx_assert((x), SX_XLOCKED)
97 
98 #define CF_DEBUG(msg...)	do {		\
99 	if (cf_verbose)				\
100 		printf("cpufreq: " msg);	\
101 	} while (0)
102 
103 static int	cpufreq_attach(device_t dev);
104 static void	cpufreq_startup_task(void *ctx, int pending);
105 static int	cpufreq_detach(device_t dev);
106 static int	cf_set_method(device_t dev, const struct cf_level *level,
107 		    int priority);
108 static int	cf_get_method(device_t dev, struct cf_level *level);
109 static int	cf_levels_method(device_t dev, struct cf_level *levels,
110 		    int *count);
111 static int	cpufreq_insert_abs(struct cpufreq_softc *sc,
112 		    struct cf_setting *sets, int count);
113 static int	cpufreq_expand_set(struct cpufreq_softc *sc,
114 		    struct cf_setting_array *set_arr);
115 static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc,
116 		    struct cf_level *dup, struct cf_setting *set);
117 static int	cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS);
118 static int	cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS);
119 static int	cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS);
120 
121 static device_method_t cpufreq_methods[] = {
122 	DEVMETHOD(device_probe,		bus_generic_probe),
123 	DEVMETHOD(device_attach,	cpufreq_attach),
124 	DEVMETHOD(device_detach,	cpufreq_detach),
125 
126         DEVMETHOD(cpufreq_set,		cf_set_method),
127         DEVMETHOD(cpufreq_get,		cf_get_method),
128         DEVMETHOD(cpufreq_levels,	cf_levels_method),
129 	{0, 0}
130 };
131 
132 static driver_t cpufreq_driver = {
133 	"cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc)
134 };
135 
136 DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, 0, 0);
137 
138 static int		cf_lowest_freq;
139 static int		cf_verbose;
140 static SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
141     "cpufreq debugging");
142 SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RWTUN, &cf_lowest_freq, 1,
143     "Don't provide levels below this frequency.");
144 SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RWTUN, &cf_verbose, 1,
145     "Print verbose debugging messages");
146 
147 /*
148  * This is called as the result of a hardware specific frequency control driver
149  * calling cpufreq_register. It provides a general interface for system wide
150  * frequency controls and operates on a per cpu basis.
151  */
152 static int
153 cpufreq_attach(device_t dev)
154 {
155 	struct cpufreq_softc *sc;
156 	struct pcpu *pc;
157 	device_t parent;
158 	uint64_t rate;
159 
160 	CF_DEBUG("initializing %s\n", device_get_nameunit(dev));
161 	sc = device_get_softc(dev);
162 	parent = device_get_parent(dev);
163 	sc->dev = dev;
164 	sysctl_ctx_init(&sc->sysctl_ctx);
165 	TAILQ_INIT(&sc->all_levels);
166 	CF_MTX_INIT(&sc->lock);
167 	sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
168 	SLIST_INIT(&sc->saved_freq);
169 	/* Try to get nominal CPU freq to use it as maximum later if needed */
170 	sc->max_mhz = cpu_get_nominal_mhz(dev);
171 	/* If that fails, try to measure the current rate */
172 	if (sc->max_mhz <= 0) {
173 		CF_DEBUG("Unable to obtain nominal frequency.\n");
174 		pc = cpu_get_pcpu(dev);
175 		if (cpu_est_clockrate(pc->pc_cpuid, &rate) == 0)
176 			sc->max_mhz = rate / 1000000;
177 		else
178 			sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
179 	}
180 
181 	CF_DEBUG("initializing one-time data for %s\n",
182 	    device_get_nameunit(dev));
183 	sc->levels_buf = malloc(CF_MAX_LEVELS * sizeof(*sc->levels_buf),
184 	    M_DEVBUF, M_WAITOK);
185 	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
186 	    SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
187 	    OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
188 	    sc, 0, cpufreq_curr_sysctl, "I", "Current CPU frequency");
189 	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
190 	    SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
191 	    OID_AUTO, "freq_levels",
192 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
193 	    cpufreq_levels_sysctl, "A", "CPU frequency levels");
194 
195 	/*
196 	 * Queue a one-shot broadcast that levels have changed.
197 	 * It will run once the system has completed booting.
198 	 */
199 	TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, dev);
200 	taskqueue_enqueue(taskqueue_thread, &sc->startup_task);
201 
202 	return (0);
203 }
204 
205 /* Handle any work to be done for all drivers that attached during boot. */
206 static void
207 cpufreq_startup_task(void *ctx, int pending)
208 {
209 
210 	cpufreq_settings_changed((device_t)ctx);
211 }
212 
213 static int
214 cpufreq_detach(device_t dev)
215 {
216 	struct cpufreq_softc *sc;
217 	struct cf_saved_freq *saved_freq;
218 
219 	CF_DEBUG("shutdown %s\n", device_get_nameunit(dev));
220 	sc = device_get_softc(dev);
221 	sysctl_ctx_free(&sc->sysctl_ctx);
222 
223 	while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) {
224 		SLIST_REMOVE_HEAD(&sc->saved_freq, link);
225 		free(saved_freq, M_TEMP);
226 	}
227 
228 	free(sc->levels_buf, M_DEVBUF);
229 
230 	return (0);
231 }
232 
233 static int
234 cf_set_method(device_t dev, const struct cf_level *level, int priority)
235 {
236 	struct cpufreq_softc *sc;
237 	const struct cf_setting *set;
238 	struct cf_saved_freq *saved_freq, *curr_freq;
239 	struct pcpu *pc;
240 	int error, i;
241 	u_char pri;
242 
243 	sc = device_get_softc(dev);
244 	error = 0;
245 	set = NULL;
246 	saved_freq = NULL;
247 
248 	/* We are going to change levels so notify the pre-change handler. */
249 	EVENTHANDLER_INVOKE(cpufreq_pre_change, level, &error);
250 	if (error != 0) {
251 		EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
252 		return (error);
253 	}
254 
255 	CF_MTX_LOCK(&sc->lock);
256 
257 #ifdef SMP
258 #ifdef EARLY_AP_STARTUP
259 	MPASS(mp_ncpus == 1 || smp_started);
260 #else
261 	/*
262 	 * If still booting and secondary CPUs not started yet, don't allow
263 	 * changing the frequency until they're online.  This is because we
264 	 * can't switch to them using sched_bind() and thus we'd only be
265 	 * switching the main CPU.  XXXTODO: Need to think more about how to
266 	 * handle having different CPUs at different frequencies.
267 	 */
268 	if (mp_ncpus > 1 && !smp_started) {
269 		device_printf(dev, "rejecting change, SMP not started yet\n");
270 		error = ENXIO;
271 		goto out;
272 	}
273 #endif
274 #endif /* SMP */
275 
276 	/*
277 	 * If the requested level has a lower priority, don't allow
278 	 * the new level right now.
279 	 */
280 	if (priority < sc->curr_priority) {
281 		CF_DEBUG("ignoring, curr prio %d less than %d\n", priority,
282 		    sc->curr_priority);
283 		error = EPERM;
284 		goto out;
285 	}
286 
287 	/*
288 	 * If the caller didn't specify a level and one is saved, prepare to
289 	 * restore the saved level.  If none has been saved, return an error.
290 	 */
291 	if (level == NULL) {
292 		saved_freq = SLIST_FIRST(&sc->saved_freq);
293 		if (saved_freq == NULL) {
294 			CF_DEBUG("NULL level, no saved level\n");
295 			error = ENXIO;
296 			goto out;
297 		}
298 		level = &saved_freq->level;
299 		priority = saved_freq->priority;
300 		CF_DEBUG("restoring saved level, freq %d prio %d\n",
301 		    level->total_set.freq, priority);
302 	}
303 
304 	/* Reject levels that are below our specified threshold. */
305 	if (level->total_set.freq < cf_lowest_freq) {
306 		CF_DEBUG("rejecting freq %d, less than %d limit\n",
307 		    level->total_set.freq, cf_lowest_freq);
308 		error = EINVAL;
309 		goto out;
310 	}
311 
312 	/* If already at this level, just return. */
313 	if (sc->curr_level.total_set.freq == level->total_set.freq) {
314 		CF_DEBUG("skipping freq %d, same as current level %d\n",
315 		    level->total_set.freq, sc->curr_level.total_set.freq);
316 		goto skip;
317 	}
318 
319 	/* First, set the absolute frequency via its driver. */
320 	set = &level->abs_set;
321 	if (set->dev) {
322 		if (!device_is_attached(set->dev)) {
323 			error = ENXIO;
324 			goto out;
325 		}
326 
327 		/* Bind to the target CPU before switching. */
328 		pc = cpu_get_pcpu(set->dev);
329 
330 		/* Skip settings if CPU is not started. */
331 		if (pc == NULL) {
332 			error = 0;
333 			goto out;
334 		}
335 		thread_lock(curthread);
336 		pri = curthread->td_priority;
337 		sched_prio(curthread, PRI_MIN);
338 		sched_bind(curthread, pc->pc_cpuid);
339 		thread_unlock(curthread);
340 		CF_DEBUG("setting abs freq %d on %s (cpu %d)\n", set->freq,
341 		    device_get_nameunit(set->dev), PCPU_GET(cpuid));
342 		error = CPUFREQ_DRV_SET(set->dev, set);
343 		thread_lock(curthread);
344 		sched_unbind(curthread);
345 		sched_prio(curthread, pri);
346 		thread_unlock(curthread);
347 		if (error) {
348 			goto out;
349 		}
350 	}
351 
352 	/* Next, set any/all relative frequencies via their drivers. */
353 	for (i = 0; i < level->rel_count; i++) {
354 		set = &level->rel_set[i];
355 		if (!device_is_attached(set->dev)) {
356 			error = ENXIO;
357 			goto out;
358 		}
359 
360 		/* Bind to the target CPU before switching. */
361 		pc = cpu_get_pcpu(set->dev);
362 		thread_lock(curthread);
363 		pri = curthread->td_priority;
364 		sched_prio(curthread, PRI_MIN);
365 		sched_bind(curthread, pc->pc_cpuid);
366 		thread_unlock(curthread);
367 		CF_DEBUG("setting rel freq %d on %s (cpu %d)\n", set->freq,
368 		    device_get_nameunit(set->dev), PCPU_GET(cpuid));
369 		error = CPUFREQ_DRV_SET(set->dev, set);
370 		thread_lock(curthread);
371 		sched_unbind(curthread);
372 		sched_prio(curthread, pri);
373 		thread_unlock(curthread);
374 		if (error) {
375 			/* XXX Back out any successful setting? */
376 			goto out;
377 		}
378 	}
379 
380 skip:
381 	/*
382 	 * Before recording the current level, check if we're going to a
383 	 * higher priority.  If so, save the previous level and priority.
384 	 */
385 	if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN &&
386 	    priority > sc->curr_priority) {
387 		CF_DEBUG("saving level, freq %d prio %d\n",
388 		    sc->curr_level.total_set.freq, sc->curr_priority);
389 		curr_freq = malloc(sizeof(*curr_freq), M_TEMP, M_NOWAIT);
390 		if (curr_freq == NULL) {
391 			error = ENOMEM;
392 			goto out;
393 		}
394 		curr_freq->level = sc->curr_level;
395 		curr_freq->priority = sc->curr_priority;
396 		SLIST_INSERT_HEAD(&sc->saved_freq, curr_freq, link);
397 	}
398 	sc->curr_level = *level;
399 	sc->curr_priority = priority;
400 
401 	/* If we were restoring a saved state, reset it to "unused". */
402 	if (saved_freq != NULL) {
403 		CF_DEBUG("resetting saved level\n");
404 		sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
405 		SLIST_REMOVE_HEAD(&sc->saved_freq, link);
406 		free(saved_freq, M_TEMP);
407 	}
408 
409 out:
410 	CF_MTX_UNLOCK(&sc->lock);
411 
412 	/*
413 	 * We changed levels (or attempted to) so notify the post-change
414 	 * handler of new frequency or error.
415 	 */
416 	EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
417 	if (error && set)
418 		device_printf(set->dev, "set freq failed, err %d\n", error);
419 
420 	return (error);
421 }
422 
423 static int
424 cpufreq_get_frequency(device_t dev)
425 {
426 	struct cf_setting set;
427 
428 	if (CPUFREQ_DRV_GET(dev, &set) != 0)
429 		return (-1);
430 
431 	return (set.freq);
432 }
433 
434 /* Returns the index into *levels with the match */
435 static int
436 cpufreq_get_level(device_t dev, struct cf_level *levels, int count)
437 {
438 	int i, freq;
439 
440 	if ((freq = cpufreq_get_frequency(dev)) < 0)
441 		return (-1);
442 	for (i = 0; i < count; i++)
443 		if (freq == levels[i].total_set.freq)
444 			return (i);
445 
446 	return (-1);
447 }
448 
449 /*
450  * Used by the cpufreq core, this function will populate *level with the current
451  * frequency as either determined by a cached value sc->curr_level, or in the
452  * case the lower level driver has set the CPUFREQ_FLAG_UNCACHED flag, it will
453  * obtain the frequency from the driver itself.
454  */
455 static int
456 cf_get_method(device_t dev, struct cf_level *level)
457 {
458 	struct cpufreq_softc *sc;
459 	struct cf_level *levels;
460 	struct cf_setting *curr_set;
461 	struct pcpu *pc;
462 	int bdiff, count, diff, error, i, type;
463 	uint64_t rate;
464 
465 	sc = device_get_softc(dev);
466 	error = 0;
467 	levels = NULL;
468 
469 	/*
470 	 * If we already know the current frequency, and the driver didn't ask
471 	 * for uncached usage, we're done.
472 	 */
473 	CF_MTX_LOCK(&sc->lock);
474 	curr_set = &sc->curr_level.total_set;
475 	error = CPUFREQ_DRV_TYPE(sc->cf_drv_dev, &type);
476 	if (error == 0 && (type & CPUFREQ_FLAG_UNCACHED)) {
477 		struct cf_setting set;
478 
479 		/*
480 		 * If the driver wants to always report back the real frequency,
481 		 * first try the driver and if that fails, fall back to
482 		 * estimating.
483 		 */
484 		if (CPUFREQ_DRV_GET(sc->cf_drv_dev, &set) == 0) {
485 			sc->curr_level.total_set = set;
486 			CF_DEBUG("get returning immediate freq %d\n",
487 			    curr_set->freq);
488 			goto out;
489 		}
490 	} else if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
491 		CF_DEBUG("get returning known freq %d\n", curr_set->freq);
492 		error = 0;
493 		goto out;
494 	}
495 	CF_MTX_UNLOCK(&sc->lock);
496 
497 	/*
498 	 * We need to figure out the current level.  Loop through every
499 	 * driver, getting the current setting.  Then, attempt to get a best
500 	 * match of settings against each level.
501 	 */
502 	count = CF_MAX_LEVELS;
503 	levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
504 	if (levels == NULL)
505 		return (ENOMEM);
506 	error = CPUFREQ_LEVELS(sc->dev, levels, &count);
507 	if (error) {
508 		if (error == E2BIG)
509 			printf("cpufreq: need to increase CF_MAX_LEVELS\n");
510 		free(levels, M_TEMP);
511 		return (error);
512 	}
513 
514 	/*
515 	 * Reacquire the lock and search for the given level.
516 	 *
517 	 * XXX Note: this is not quite right since we really need to go
518 	 * through each level and compare both absolute and relative
519 	 * settings for each driver in the system before making a match.
520 	 * The estimation code below catches this case though.
521 	 */
522 	CF_MTX_LOCK(&sc->lock);
523 	i = cpufreq_get_level(sc->cf_drv_dev, levels, count);
524 	if (i >= 0)
525 		sc->curr_level = levels[i];
526 	else
527 		CF_DEBUG("Couldn't find supported level for %s\n",
528 		    device_get_nameunit(sc->cf_drv_dev));
529 
530 	if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
531 		CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq);
532 		goto out;
533 	}
534 
535 	/*
536 	 * We couldn't find an exact match, so attempt to estimate and then
537 	 * match against a level.
538 	 */
539 	pc = cpu_get_pcpu(dev);
540 	if (pc == NULL) {
541 		error = ENXIO;
542 		goto out;
543 	}
544 	cpu_est_clockrate(pc->pc_cpuid, &rate);
545 	rate /= 1000000;
546 	bdiff = 1 << 30;
547 	for (i = 0; i < count; i++) {
548 		diff = abs(levels[i].total_set.freq - rate);
549 		if (diff < bdiff) {
550 			bdiff = diff;
551 			sc->curr_level = levels[i];
552 		}
553 	}
554 	CF_DEBUG("get estimated freq %d\n", curr_set->freq);
555 
556 out:
557 	if (error == 0)
558 		*level = sc->curr_level;
559 
560 	CF_MTX_UNLOCK(&sc->lock);
561 	if (levels)
562 		free(levels, M_TEMP);
563 	return (error);
564 }
565 
566 /*
567  * Either directly obtain settings from the cpufreq driver, or build a list of
568  * relative settings to be integrated later against an absolute max.
569  */
570 static int
571 cpufreq_add_levels(device_t cf_dev, struct cf_setting_lst *rel_sets)
572 {
573 	struct cf_setting_array *set_arr;
574 	struct cf_setting *sets;
575 	device_t dev;
576 	struct cpufreq_softc *sc;
577 	int type, set_count, error;
578 
579 	sc = device_get_softc(cf_dev);
580 	dev = sc->cf_drv_dev;
581 
582 	/* Skip devices that aren't ready. */
583 	if (!device_is_attached(cf_dev))
584 		return (0);
585 
586 	/*
587 	 * Get settings, skipping drivers that offer no settings or
588 	 * provide settings for informational purposes only.
589 	 */
590 	error = CPUFREQ_DRV_TYPE(dev, &type);
591 	if (error != 0 || (type & CPUFREQ_FLAG_INFO_ONLY)) {
592 		if (error == 0) {
593 			CF_DEBUG("skipping info-only driver %s\n",
594 			    device_get_nameunit(cf_dev));
595 		}
596 		return (error);
597 	}
598 
599 	sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT);
600 	if (sets == NULL)
601 		return (ENOMEM);
602 
603 	set_count = MAX_SETTINGS;
604 	error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count);
605 	if (error != 0 || set_count == 0)
606 		goto out;
607 
608 	/* Add the settings to our absolute/relative lists. */
609 	switch (type & CPUFREQ_TYPE_MASK) {
610 	case CPUFREQ_TYPE_ABSOLUTE:
611 		error = cpufreq_insert_abs(sc, sets, set_count);
612 		break;
613 	case CPUFREQ_TYPE_RELATIVE:
614 		CF_DEBUG("adding %d relative settings\n", set_count);
615 		set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT);
616 		if (set_arr == NULL) {
617 			error = ENOMEM;
618 			goto out;
619 		}
620 		bcopy(sets, set_arr->sets, set_count * sizeof(*sets));
621 		set_arr->count = set_count;
622 		TAILQ_INSERT_TAIL(rel_sets, set_arr, link);
623 		break;
624 	default:
625 		error = EINVAL;
626 	}
627 
628 out:
629 	free(sets, M_TEMP);
630 	return (error);
631 }
632 
633 static int
634 cf_levels_method(device_t dev, struct cf_level *levels, int *count)
635 {
636 	struct cf_setting_array *set_arr;
637 	struct cf_setting_lst rel_sets;
638 	struct cpufreq_softc *sc;
639 	struct cf_level *lev;
640 	struct pcpu *pc;
641 	int error, i;
642 	uint64_t rate;
643 
644 	if (levels == NULL || count == NULL)
645 		return (EINVAL);
646 
647 	TAILQ_INIT(&rel_sets);
648 	sc = device_get_softc(dev);
649 
650 	CF_MTX_LOCK(&sc->lock);
651 	error = cpufreq_add_levels(sc->dev, &rel_sets);
652 	if (error)
653 		goto out;
654 
655 	/*
656 	 * If there are no absolute levels, create a fake one at 100%.  We
657 	 * then cache the clockrate for later use as our base frequency.
658 	 */
659 	if (TAILQ_EMPTY(&sc->all_levels)) {
660 		struct cf_setting set;
661 
662 		CF_DEBUG("No absolute levels returned by driver\n");
663 
664 		if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) {
665 			sc->max_mhz = cpu_get_nominal_mhz(dev);
666 			/*
667 			 * If the CPU can't report a rate for 100%, hope
668 			 * the CPU is running at its nominal rate right now,
669 			 * and use that instead.
670 			 */
671 			if (sc->max_mhz <= 0) {
672 				pc = cpu_get_pcpu(dev);
673 				cpu_est_clockrate(pc->pc_cpuid, &rate);
674 				sc->max_mhz = rate / 1000000;
675 			}
676 		}
677 		memset(&set, CPUFREQ_VAL_UNKNOWN, sizeof(set));
678 		set.freq = sc->max_mhz;
679 		set.dev = NULL;
680 		error = cpufreq_insert_abs(sc, &set, 1);
681 		if (error)
682 			goto out;
683 	}
684 
685 	/* Create a combined list of absolute + relative levels. */
686 	TAILQ_FOREACH(set_arr, &rel_sets, link)
687 		cpufreq_expand_set(sc, set_arr);
688 
689 	/* If the caller doesn't have enough space, return the actual count. */
690 	if (sc->all_count > *count) {
691 		*count = sc->all_count;
692 		error = E2BIG;
693 		goto out;
694 	}
695 
696 	/* Finally, output the list of levels. */
697 	i = 0;
698 	TAILQ_FOREACH(lev, &sc->all_levels, link) {
699 		/* Skip levels that have a frequency that is too low. */
700 		if (lev->total_set.freq < cf_lowest_freq) {
701 			sc->all_count--;
702 			continue;
703 		}
704 
705 		levels[i] = *lev;
706 		i++;
707 	}
708 	*count = sc->all_count;
709 	error = 0;
710 
711 out:
712 	/* Clear all levels since we regenerate them each time. */
713 	while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) {
714 		TAILQ_REMOVE(&sc->all_levels, lev, link);
715 		free(lev, M_TEMP);
716 	}
717 	sc->all_count = 0;
718 
719 	CF_MTX_UNLOCK(&sc->lock);
720 	while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) {
721 		TAILQ_REMOVE(&rel_sets, set_arr, link);
722 		free(set_arr, M_TEMP);
723 	}
724 	return (error);
725 }
726 
727 /*
728  * Create levels for an array of absolute settings and insert them in
729  * sorted order in the specified list.
730  */
731 static int
732 cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets,
733     int count)
734 {
735 	struct cf_level_lst *list;
736 	struct cf_level *level, *search;
737 	int i, inserted;
738 
739 	CF_MTX_ASSERT(&sc->lock);
740 
741 	list = &sc->all_levels;
742 	for (i = 0; i < count; i++) {
743 		level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO);
744 		if (level == NULL)
745 			return (ENOMEM);
746 		level->abs_set = sets[i];
747 		level->total_set = sets[i];
748 		level->total_set.dev = NULL;
749 		sc->all_count++;
750 		inserted = 0;
751 
752 		if (TAILQ_EMPTY(list)) {
753 			CF_DEBUG("adding abs setting %d at head\n",
754 			    sets[i].freq);
755 			TAILQ_INSERT_HEAD(list, level, link);
756 			continue;
757 		}
758 
759 		TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link)
760 			if (sets[i].freq <= search->total_set.freq) {
761 				CF_DEBUG("adding abs setting %d after %d\n",
762 				    sets[i].freq, search->total_set.freq);
763 				TAILQ_INSERT_AFTER(list, search, level, link);
764 				inserted = 1;
765 				break;
766 			}
767 
768 		if (inserted == 0) {
769 			TAILQ_FOREACH(search, list, link)
770 				if (sets[i].freq >= search->total_set.freq) {
771 					CF_DEBUG("adding abs setting %d before %d\n",
772 					    sets[i].freq, search->total_set.freq);
773 					TAILQ_INSERT_BEFORE(search, level, link);
774 					break;
775 				}
776 		}
777 	}
778 
779 	return (0);
780 }
781 
782 /*
783  * Expand a group of relative settings, creating derived levels from them.
784  */
785 static int
786 cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr)
787 {
788 	struct cf_level *fill, *search;
789 	struct cf_setting *set;
790 	int i;
791 
792 	CF_MTX_ASSERT(&sc->lock);
793 
794 	/*
795 	 * Walk the set of all existing levels in reverse.  This is so we
796 	 * create derived states from the lowest absolute settings first
797 	 * and discard duplicates created from higher absolute settings.
798 	 * For instance, a level of 50 Mhz derived from 100 Mhz + 50% is
799 	 * preferable to 200 Mhz + 25% because absolute settings are more
800 	 * efficient since they often change the voltage as well.
801 	 */
802 	TAILQ_FOREACH_REVERSE(search, &sc->all_levels, cf_level_lst, link) {
803 		/* Add each setting to the level, duplicating if necessary. */
804 		for (i = 0; i < set_arr->count; i++) {
805 			set = &set_arr->sets[i];
806 
807 			/*
808 			 * If this setting is less than 100%, split the level
809 			 * into two and add this setting to the new level.
810 			 */
811 			fill = search;
812 			if (set->freq < 10000) {
813 				fill = cpufreq_dup_set(sc, search, set);
814 
815 				/*
816 				 * The new level was a duplicate of an existing
817 				 * level or its absolute setting is too high
818 				 * so we freed it.  For example, we discard a
819 				 * derived level of 1000 MHz/25% if a level
820 				 * of 500 MHz/100% already exists.
821 				 */
822 				if (fill == NULL)
823 					break;
824 			}
825 
826 			/* Add this setting to the existing or new level. */
827 			KASSERT(fill->rel_count < MAX_SETTINGS,
828 			    ("cpufreq: too many relative drivers (%d)",
829 			    MAX_SETTINGS));
830 			fill->rel_set[fill->rel_count] = *set;
831 			fill->rel_count++;
832 			CF_DEBUG(
833 			"expand set added rel setting %d%% to %d level\n",
834 			    set->freq / 100, fill->total_set.freq);
835 		}
836 	}
837 
838 	return (0);
839 }
840 
841 static struct cf_level *
842 cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup,
843     struct cf_setting *set)
844 {
845 	struct cf_level_lst *list;
846 	struct cf_level *fill, *itr;
847 	struct cf_setting *fill_set, *itr_set;
848 	int i;
849 
850 	CF_MTX_ASSERT(&sc->lock);
851 
852 	/*
853 	 * Create a new level, copy it from the old one, and update the
854 	 * total frequency and power by the percentage specified in the
855 	 * relative setting.
856 	 */
857 	fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT);
858 	if (fill == NULL)
859 		return (NULL);
860 	*fill = *dup;
861 	fill_set = &fill->total_set;
862 	fill_set->freq =
863 	    ((uint64_t)fill_set->freq * set->freq) / 10000;
864 	if (fill_set->power != CPUFREQ_VAL_UNKNOWN) {
865 		fill_set->power = ((uint64_t)fill_set->power * set->freq)
866 		    / 10000;
867 	}
868 	if (set->lat != CPUFREQ_VAL_UNKNOWN) {
869 		if (fill_set->lat != CPUFREQ_VAL_UNKNOWN)
870 			fill_set->lat += set->lat;
871 		else
872 			fill_set->lat = set->lat;
873 	}
874 	CF_DEBUG("dup set considering derived setting %d\n", fill_set->freq);
875 
876 	/*
877 	 * If we copied an old level that we already modified (say, at 100%),
878 	 * we need to remove that setting before adding this one.  Since we
879 	 * process each setting array in order, we know any settings for this
880 	 * driver will be found at the end.
881 	 */
882 	for (i = fill->rel_count; i != 0; i--) {
883 		if (fill->rel_set[i - 1].dev != set->dev)
884 			break;
885 		CF_DEBUG("removed last relative driver: %s\n",
886 		    device_get_nameunit(set->dev));
887 		fill->rel_count--;
888 	}
889 
890 	/*
891 	 * Insert the new level in sorted order.  If it is a duplicate of an
892 	 * existing level (1) or has an absolute setting higher than the
893 	 * existing level (2), do not add it.  We can do this since any such
894 	 * level is guaranteed use less power.  For example (1), a level with
895 	 * one absolute setting of 800 Mhz uses less power than one composed
896 	 * of an absolute setting of 1600 Mhz and a relative setting at 50%.
897 	 * Also for example (2), a level of 800 Mhz/75% is preferable to
898 	 * 1600 Mhz/25% even though the latter has a lower total frequency.
899 	 */
900 	list = &sc->all_levels;
901 	KASSERT(!TAILQ_EMPTY(list), ("all levels list empty in dup set"));
902 	TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) {
903 		itr_set = &itr->total_set;
904 		if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) {
905 			CF_DEBUG("dup set rejecting %d (dupe)\n",
906 			    fill_set->freq);
907 			itr = NULL;
908 			break;
909 		} else if (fill_set->freq < itr_set->freq) {
910 			if (fill->abs_set.freq <= itr->abs_set.freq) {
911 				CF_DEBUG(
912 			"dup done, inserting new level %d after %d\n",
913 				    fill_set->freq, itr_set->freq);
914 				TAILQ_INSERT_AFTER(list, itr, fill, link);
915 				sc->all_count++;
916 			} else {
917 				CF_DEBUG("dup set rejecting %d (abs too big)\n",
918 				    fill_set->freq);
919 				itr = NULL;
920 			}
921 			break;
922 		}
923 	}
924 
925 	/* We didn't find a good place for this new level so free it. */
926 	if (itr == NULL) {
927 		CF_DEBUG("dup set freeing new level %d (not optimal)\n",
928 		    fill_set->freq);
929 		free(fill, M_TEMP);
930 		fill = NULL;
931 	}
932 
933 	return (fill);
934 }
935 
936 static int
937 cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS)
938 {
939 	struct cpufreq_softc *sc;
940 	struct cf_level *levels;
941 	int best, count, diff, bdiff, devcount, error, freq, i, n;
942 	device_t *devs;
943 
944 	devs = NULL;
945 	sc = oidp->oid_arg1;
946 	levels = sc->levels_buf;
947 
948 	error = CPUFREQ_GET(sc->dev, &levels[0]);
949 	if (error)
950 		goto out;
951 	freq = levels[0].total_set.freq;
952 	error = sysctl_handle_int(oidp, &freq, 0, req);
953 	if (error != 0 || req->newptr == NULL)
954 		goto out;
955 
956 	/*
957 	 * While we only call cpufreq_get() on one device (assuming all
958 	 * CPUs have equal levels), we call cpufreq_set() on all CPUs.
959 	 * This is needed for some MP systems.
960 	 */
961 	error = devclass_get_devices(devclass_find("cpufreq"), &devs, &devcount);
962 	if (error)
963 		goto out;
964 	for (n = 0; n < devcount; n++) {
965 		count = CF_MAX_LEVELS;
966 		error = CPUFREQ_LEVELS(devs[n], levels, &count);
967 		if (error) {
968 			if (error == E2BIG)
969 				printf(
970 			"cpufreq: need to increase CF_MAX_LEVELS\n");
971 			break;
972 		}
973 		best = 0;
974 		bdiff = 1 << 30;
975 		for (i = 0; i < count; i++) {
976 			diff = abs(levels[i].total_set.freq - freq);
977 			if (diff < bdiff) {
978 				bdiff = diff;
979 				best = i;
980 			}
981 		}
982 		error = CPUFREQ_SET(devs[n], &levels[best], CPUFREQ_PRIO_USER);
983 	}
984 
985 out:
986 	if (devs)
987 		free(devs, M_TEMP);
988 	return (error);
989 }
990 
991 static int
992 cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS)
993 {
994 	struct cpufreq_softc *sc;
995 	struct cf_level *levels;
996 	struct cf_setting *set;
997 	struct sbuf sb;
998 	int count, error, i;
999 
1000 	sc = oidp->oid_arg1;
1001 	sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
1002 
1003 	/* Get settings from the device and generate the output string. */
1004 	count = CF_MAX_LEVELS;
1005 	levels = sc->levels_buf;
1006 	if (levels == NULL) {
1007 		sbuf_delete(&sb);
1008 		return (ENOMEM);
1009 	}
1010 	error = CPUFREQ_LEVELS(sc->dev, levels, &count);
1011 	if (error) {
1012 		if (error == E2BIG)
1013 			printf("cpufreq: need to increase CF_MAX_LEVELS\n");
1014 		goto out;
1015 	}
1016 	if (count) {
1017 		for (i = 0; i < count; i++) {
1018 			set = &levels[i].total_set;
1019 			sbuf_printf(&sb, "%d/%d ", set->freq, set->power);
1020 		}
1021 	} else
1022 		sbuf_cpy(&sb, "0");
1023 	sbuf_trim(&sb);
1024 	sbuf_finish(&sb);
1025 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1026 
1027 out:
1028 	sbuf_delete(&sb);
1029 	return (error);
1030 }
1031 
1032 static int
1033 cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS)
1034 {
1035 	device_t dev;
1036 	struct cf_setting *sets;
1037 	struct sbuf sb;
1038 	int error, i, set_count;
1039 
1040 	dev = oidp->oid_arg1;
1041 	sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
1042 
1043 	/* Get settings from the device and generate the output string. */
1044 	set_count = MAX_SETTINGS;
1045 	sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT);
1046 	if (sets == NULL) {
1047 		sbuf_delete(&sb);
1048 		return (ENOMEM);
1049 	}
1050 	error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count);
1051 	if (error)
1052 		goto out;
1053 	if (set_count) {
1054 		for (i = 0; i < set_count; i++)
1055 			sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power);
1056 	} else
1057 		sbuf_cpy(&sb, "0");
1058 	sbuf_trim(&sb);
1059 	sbuf_finish(&sb);
1060 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1061 
1062 out:
1063 	free(sets, M_TEMP);
1064 	sbuf_delete(&sb);
1065 	return (error);
1066 }
1067 
1068 static void
1069 cpufreq_add_freq_driver_sysctl(device_t cf_dev)
1070 {
1071 	struct cpufreq_softc *sc;
1072 
1073 	sc = device_get_softc(cf_dev);
1074 	SYSCTL_ADD_CONST_STRING(&sc->sysctl_ctx,
1075 	    SYSCTL_CHILDREN(device_get_sysctl_tree(cf_dev)), OID_AUTO,
1076 	    "freq_driver", CTLFLAG_RD, device_get_nameunit(sc->cf_drv_dev),
1077 	    "cpufreq driver used by this cpu");
1078 }
1079 
1080 int
1081 cpufreq_register(device_t dev)
1082 {
1083 	struct cpufreq_softc *sc;
1084 	device_t cf_dev, cpu_dev;
1085 	int error;
1086 
1087 	/* Add a sysctl to get each driver's settings separately. */
1088 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1089 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1090 	    OID_AUTO, "freq_settings",
1091 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dev, 0,
1092 	    cpufreq_settings_sysctl, "A", "CPU frequency driver settings");
1093 
1094 	/*
1095 	 * Add only one cpufreq device to each CPU.  Currently, all CPUs
1096 	 * must offer the same levels and be switched at the same time.
1097 	 */
1098 	cpu_dev = device_get_parent(dev);
1099 	if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) {
1100 		sc = device_get_softc(cf_dev);
1101 		sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
1102 		MPASS(sc->cf_drv_dev != NULL);
1103 		return (0);
1104 	}
1105 
1106 	/* Add the child device and possibly sysctls. */
1107 	cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", device_get_unit(cpu_dev));
1108 	if (cf_dev == NULL)
1109 		return (ENOMEM);
1110 	device_quiet(cf_dev);
1111 
1112 	error = device_probe_and_attach(cf_dev);
1113 	if (error)
1114 		return (error);
1115 
1116 	sc = device_get_softc(cf_dev);
1117 	sc->cf_drv_dev = dev;
1118 	cpufreq_add_freq_driver_sysctl(cf_dev);
1119 	return (error);
1120 }
1121 
1122 int
1123 cpufreq_unregister(device_t dev)
1124 {
1125 	device_t cf_dev;
1126 	struct cpufreq_softc *sc __diagused;
1127 
1128 	/*
1129 	 * If this is the last cpufreq child device, remove the control
1130 	 * device as well.  We identify cpufreq children by calling a method
1131 	 * they support.
1132 	 */
1133 	cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1);
1134 	if (cf_dev == NULL) {
1135 		device_printf(dev,
1136 	"warning: cpufreq_unregister called with no cpufreq device active\n");
1137 		return (0);
1138 	}
1139 	sc = device_get_softc(cf_dev);
1140 	MPASS(sc->cf_drv_dev == dev);
1141 	device_delete_child(device_get_parent(cf_dev), cf_dev);
1142 
1143 	return (0);
1144 }
1145 
1146 int
1147 cpufreq_settings_changed(device_t dev)
1148 {
1149 
1150 	EVENTHANDLER_INVOKE(cpufreq_levels_changed,
1151 	    device_get_unit(device_get_parent(dev)));
1152 	return (0);
1153 }
1154