1 /*- 2 * Copyright (c) 2004-2007 Nate Lawson (SDG) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/cpu.h> 33 #include <sys/eventhandler.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/proc.h> 39 #include <sys/queue.h> 40 #include <sys/sbuf.h> 41 #include <sys/sched.h> 42 #include <sys/smp.h> 43 #include <sys/sysctl.h> 44 #include <sys/systm.h> 45 #include <sys/sx.h> 46 #include <sys/timetc.h> 47 #include <sys/taskqueue.h> 48 49 #include "cpufreq_if.h" 50 51 /* 52 * Common CPU frequency glue code. Drivers for specific hardware can 53 * attach this interface to allow users to get/set the CPU frequency. 54 */ 55 56 /* 57 * Number of levels we can handle. Levels are synthesized from settings 58 * so for M settings and N drivers, there may be M*N levels. 59 */ 60 #define CF_MAX_LEVELS 64 61 62 struct cf_saved_freq { 63 struct cf_level level; 64 int priority; 65 SLIST_ENTRY(cf_saved_freq) link; 66 }; 67 68 struct cpufreq_softc { 69 struct sx lock; 70 struct cf_level curr_level; 71 int curr_priority; 72 SLIST_HEAD(, cf_saved_freq) saved_freq; 73 struct cf_level_lst all_levels; 74 int all_count; 75 int max_mhz; 76 device_t dev; 77 struct sysctl_ctx_list sysctl_ctx; 78 struct task startup_task; 79 struct cf_level *levels_buf; 80 }; 81 82 struct cf_setting_array { 83 struct cf_setting sets[MAX_SETTINGS]; 84 int count; 85 TAILQ_ENTRY(cf_setting_array) link; 86 }; 87 88 TAILQ_HEAD(cf_setting_lst, cf_setting_array); 89 90 #define CF_MTX_INIT(x) sx_init((x), "cpufreq lock") 91 #define CF_MTX_LOCK(x) sx_xlock((x)) 92 #define CF_MTX_UNLOCK(x) sx_xunlock((x)) 93 #define CF_MTX_ASSERT(x) sx_assert((x), SX_XLOCKED) 94 95 #define CF_DEBUG(msg...) do { \ 96 if (cf_verbose) \ 97 printf("cpufreq: " msg); \ 98 } while (0) 99 100 static int cpufreq_attach(device_t dev); 101 static void cpufreq_startup_task(void *ctx, int pending); 102 static int cpufreq_detach(device_t dev); 103 static int cf_set_method(device_t dev, const struct cf_level *level, 104 int priority); 105 static int cf_get_method(device_t dev, struct cf_level *level); 106 static int cf_levels_method(device_t dev, struct cf_level *levels, 107 int *count); 108 static int cpufreq_insert_abs(struct cpufreq_softc *sc, 109 struct cf_setting *sets, int count); 110 static int cpufreq_expand_set(struct cpufreq_softc *sc, 111 struct cf_setting_array *set_arr); 112 static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc, 113 struct cf_level *dup, struct cf_setting *set); 114 static int cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS); 115 static int cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS); 116 static int cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS); 117 118 static device_method_t cpufreq_methods[] = { 119 DEVMETHOD(device_probe, bus_generic_probe), 120 DEVMETHOD(device_attach, cpufreq_attach), 121 DEVMETHOD(device_detach, cpufreq_detach), 122 123 DEVMETHOD(cpufreq_set, cf_set_method), 124 DEVMETHOD(cpufreq_get, cf_get_method), 125 DEVMETHOD(cpufreq_levels, cf_levels_method), 126 {0, 0} 127 }; 128 static driver_t cpufreq_driver = { 129 "cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc) 130 }; 131 static devclass_t cpufreq_dc; 132 DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, cpufreq_dc, 0, 0); 133 134 static int cf_lowest_freq; 135 static int cf_verbose; 136 TUNABLE_INT("debug.cpufreq.lowest", &cf_lowest_freq); 137 TUNABLE_INT("debug.cpufreq.verbose", &cf_verbose); 138 static SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL, 139 "cpufreq debugging"); 140 SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RW, &cf_lowest_freq, 1, 141 "Don't provide levels below this frequency."); 142 SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RW, &cf_verbose, 1, 143 "Print verbose debugging messages"); 144 145 static int 146 cpufreq_attach(device_t dev) 147 { 148 struct cpufreq_softc *sc; 149 struct pcpu *pc; 150 device_t parent; 151 uint64_t rate; 152 int numdevs; 153 154 CF_DEBUG("initializing %s\n", device_get_nameunit(dev)); 155 sc = device_get_softc(dev); 156 parent = device_get_parent(dev); 157 sc->dev = dev; 158 sysctl_ctx_init(&sc->sysctl_ctx); 159 TAILQ_INIT(&sc->all_levels); 160 CF_MTX_INIT(&sc->lock); 161 sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 162 SLIST_INIT(&sc->saved_freq); 163 /* Try to get nominal CPU freq to use it as maximum later if needed */ 164 sc->max_mhz = cpu_get_nominal_mhz(dev); 165 /* If that fails, try to measure the current rate */ 166 if (sc->max_mhz <= 0) { 167 pc = cpu_get_pcpu(dev); 168 if (cpu_est_clockrate(pc->pc_cpuid, &rate) == 0) 169 sc->max_mhz = rate / 1000000; 170 else 171 sc->max_mhz = CPUFREQ_VAL_UNKNOWN; 172 } 173 174 /* 175 * Only initialize one set of sysctls for all CPUs. In the future, 176 * if multiple CPUs can have different settings, we can move these 177 * sysctls to be under every CPU instead of just the first one. 178 */ 179 numdevs = devclass_get_count(cpufreq_dc); 180 if (numdevs > 1) 181 return (0); 182 183 CF_DEBUG("initializing one-time data for %s\n", 184 device_get_nameunit(dev)); 185 sc->levels_buf = malloc(CF_MAX_LEVELS * sizeof(*sc->levels_buf), 186 M_DEVBUF, M_WAITOK); 187 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 188 SYSCTL_CHILDREN(device_get_sysctl_tree(parent)), 189 OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 190 cpufreq_curr_sysctl, "I", "Current CPU frequency"); 191 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 192 SYSCTL_CHILDREN(device_get_sysctl_tree(parent)), 193 OID_AUTO, "freq_levels", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 194 cpufreq_levels_sysctl, "A", "CPU frequency levels"); 195 196 /* 197 * Queue a one-shot broadcast that levels have changed. 198 * It will run once the system has completed booting. 199 */ 200 TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, dev); 201 taskqueue_enqueue(taskqueue_thread, &sc->startup_task); 202 203 return (0); 204 } 205 206 /* Handle any work to be done for all drivers that attached during boot. */ 207 static void 208 cpufreq_startup_task(void *ctx, int pending) 209 { 210 211 cpufreq_settings_changed((device_t)ctx); 212 } 213 214 static int 215 cpufreq_detach(device_t dev) 216 { 217 struct cpufreq_softc *sc; 218 struct cf_saved_freq *saved_freq; 219 int numdevs; 220 221 CF_DEBUG("shutdown %s\n", device_get_nameunit(dev)); 222 sc = device_get_softc(dev); 223 sysctl_ctx_free(&sc->sysctl_ctx); 224 225 while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) { 226 SLIST_REMOVE_HEAD(&sc->saved_freq, link); 227 free(saved_freq, M_TEMP); 228 } 229 230 /* Only clean up these resources when the last device is detaching. */ 231 numdevs = devclass_get_count(cpufreq_dc); 232 if (numdevs == 1) { 233 CF_DEBUG("final shutdown for %s\n", device_get_nameunit(dev)); 234 free(sc->levels_buf, M_DEVBUF); 235 } 236 237 return (0); 238 } 239 240 static int 241 cf_set_method(device_t dev, const struct cf_level *level, int priority) 242 { 243 struct cpufreq_softc *sc; 244 const struct cf_setting *set; 245 struct cf_saved_freq *saved_freq, *curr_freq; 246 struct pcpu *pc; 247 int error, i; 248 249 sc = device_get_softc(dev); 250 error = 0; 251 set = NULL; 252 saved_freq = NULL; 253 254 /* We are going to change levels so notify the pre-change handler. */ 255 EVENTHANDLER_INVOKE(cpufreq_pre_change, level, &error); 256 if (error != 0) { 257 EVENTHANDLER_INVOKE(cpufreq_post_change, level, error); 258 return (error); 259 } 260 261 CF_MTX_LOCK(&sc->lock); 262 263 #ifdef SMP 264 /* 265 * If still booting and secondary CPUs not started yet, don't allow 266 * changing the frequency until they're online. This is because we 267 * can't switch to them using sched_bind() and thus we'd only be 268 * switching the main CPU. XXXTODO: Need to think more about how to 269 * handle having different CPUs at different frequencies. 270 */ 271 if (mp_ncpus > 1 && !smp_active) { 272 device_printf(dev, "rejecting change, SMP not started yet\n"); 273 error = ENXIO; 274 goto out; 275 } 276 #endif /* SMP */ 277 278 /* 279 * If the requested level has a lower priority, don't allow 280 * the new level right now. 281 */ 282 if (priority < sc->curr_priority) { 283 CF_DEBUG("ignoring, curr prio %d less than %d\n", priority, 284 sc->curr_priority); 285 error = EPERM; 286 goto out; 287 } 288 289 /* 290 * If the caller didn't specify a level and one is saved, prepare to 291 * restore the saved level. If none has been saved, return an error. 292 */ 293 if (level == NULL) { 294 saved_freq = SLIST_FIRST(&sc->saved_freq); 295 if (saved_freq == NULL) { 296 CF_DEBUG("NULL level, no saved level\n"); 297 error = ENXIO; 298 goto out; 299 } 300 level = &saved_freq->level; 301 priority = saved_freq->priority; 302 CF_DEBUG("restoring saved level, freq %d prio %d\n", 303 level->total_set.freq, priority); 304 } 305 306 /* Reject levels that are below our specified threshold. */ 307 if (level->total_set.freq < cf_lowest_freq) { 308 CF_DEBUG("rejecting freq %d, less than %d limit\n", 309 level->total_set.freq, cf_lowest_freq); 310 error = EINVAL; 311 goto out; 312 } 313 314 /* If already at this level, just return. */ 315 if (CPUFREQ_CMP(sc->curr_level.total_set.freq, level->total_set.freq)) { 316 CF_DEBUG("skipping freq %d, same as current level %d\n", 317 level->total_set.freq, sc->curr_level.total_set.freq); 318 goto skip; 319 } 320 321 /* First, set the absolute frequency via its driver. */ 322 set = &level->abs_set; 323 if (set->dev) { 324 if (!device_is_attached(set->dev)) { 325 error = ENXIO; 326 goto out; 327 } 328 329 /* Bind to the target CPU before switching. */ 330 pc = cpu_get_pcpu(set->dev); 331 thread_lock(curthread); 332 sched_bind(curthread, pc->pc_cpuid); 333 thread_unlock(curthread); 334 CF_DEBUG("setting abs freq %d on %s (cpu %d)\n", set->freq, 335 device_get_nameunit(set->dev), PCPU_GET(cpuid)); 336 error = CPUFREQ_DRV_SET(set->dev, set); 337 thread_lock(curthread); 338 sched_unbind(curthread); 339 thread_unlock(curthread); 340 if (error) { 341 goto out; 342 } 343 } 344 345 /* Next, set any/all relative frequencies via their drivers. */ 346 for (i = 0; i < level->rel_count; i++) { 347 set = &level->rel_set[i]; 348 if (!device_is_attached(set->dev)) { 349 error = ENXIO; 350 goto out; 351 } 352 353 /* Bind to the target CPU before switching. */ 354 pc = cpu_get_pcpu(set->dev); 355 thread_lock(curthread); 356 sched_bind(curthread, pc->pc_cpuid); 357 thread_unlock(curthread); 358 CF_DEBUG("setting rel freq %d on %s (cpu %d)\n", set->freq, 359 device_get_nameunit(set->dev), PCPU_GET(cpuid)); 360 error = CPUFREQ_DRV_SET(set->dev, set); 361 thread_lock(curthread); 362 sched_unbind(curthread); 363 thread_unlock(curthread); 364 if (error) { 365 /* XXX Back out any successful setting? */ 366 goto out; 367 } 368 } 369 370 skip: 371 /* 372 * Before recording the current level, check if we're going to a 373 * higher priority. If so, save the previous level and priority. 374 */ 375 if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN && 376 priority > sc->curr_priority) { 377 CF_DEBUG("saving level, freq %d prio %d\n", 378 sc->curr_level.total_set.freq, sc->curr_priority); 379 curr_freq = malloc(sizeof(*curr_freq), M_TEMP, M_NOWAIT); 380 if (curr_freq == NULL) { 381 error = ENOMEM; 382 goto out; 383 } 384 curr_freq->level = sc->curr_level; 385 curr_freq->priority = sc->curr_priority; 386 SLIST_INSERT_HEAD(&sc->saved_freq, curr_freq, link); 387 } 388 sc->curr_level = *level; 389 sc->curr_priority = priority; 390 391 /* If we were restoring a saved state, reset it to "unused". */ 392 if (saved_freq != NULL) { 393 CF_DEBUG("resetting saved level\n"); 394 sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 395 SLIST_REMOVE_HEAD(&sc->saved_freq, link); 396 free(saved_freq, M_TEMP); 397 } 398 399 out: 400 CF_MTX_UNLOCK(&sc->lock); 401 402 /* 403 * We changed levels (or attempted to) so notify the post-change 404 * handler of new frequency or error. 405 */ 406 EVENTHANDLER_INVOKE(cpufreq_post_change, level, error); 407 if (error && set) 408 device_printf(set->dev, "set freq failed, err %d\n", error); 409 410 return (error); 411 } 412 413 static int 414 cf_get_method(device_t dev, struct cf_level *level) 415 { 416 struct cpufreq_softc *sc; 417 struct cf_level *levels; 418 struct cf_setting *curr_set, set; 419 struct pcpu *pc; 420 device_t *devs; 421 int count, error, i, n, numdevs; 422 uint64_t rate; 423 424 sc = device_get_softc(dev); 425 error = 0; 426 levels = NULL; 427 428 /* If we already know the current frequency, we're done. */ 429 CF_MTX_LOCK(&sc->lock); 430 curr_set = &sc->curr_level.total_set; 431 if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) { 432 CF_DEBUG("get returning known freq %d\n", curr_set->freq); 433 goto out; 434 } 435 CF_MTX_UNLOCK(&sc->lock); 436 437 /* 438 * We need to figure out the current level. Loop through every 439 * driver, getting the current setting. Then, attempt to get a best 440 * match of settings against each level. 441 */ 442 count = CF_MAX_LEVELS; 443 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 444 if (levels == NULL) 445 return (ENOMEM); 446 error = CPUFREQ_LEVELS(sc->dev, levels, &count); 447 if (error) { 448 if (error == E2BIG) 449 printf("cpufreq: need to increase CF_MAX_LEVELS\n"); 450 free(levels, M_TEMP); 451 return (error); 452 } 453 error = device_get_children(device_get_parent(dev), &devs, &numdevs); 454 if (error) { 455 free(levels, M_TEMP); 456 return (error); 457 } 458 459 /* 460 * Reacquire the lock and search for the given level. 461 * 462 * XXX Note: this is not quite right since we really need to go 463 * through each level and compare both absolute and relative 464 * settings for each driver in the system before making a match. 465 * The estimation code below catches this case though. 466 */ 467 CF_MTX_LOCK(&sc->lock); 468 for (n = 0; n < numdevs && curr_set->freq == CPUFREQ_VAL_UNKNOWN; n++) { 469 if (!device_is_attached(devs[n])) 470 continue; 471 if (CPUFREQ_DRV_GET(devs[n], &set) != 0) 472 continue; 473 for (i = 0; i < count; i++) { 474 if (CPUFREQ_CMP(set.freq, levels[i].total_set.freq)) { 475 sc->curr_level = levels[i]; 476 break; 477 } 478 } 479 } 480 free(devs, M_TEMP); 481 if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) { 482 CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq); 483 goto out; 484 } 485 486 /* 487 * We couldn't find an exact match, so attempt to estimate and then 488 * match against a level. 489 */ 490 pc = cpu_get_pcpu(dev); 491 if (pc == NULL) { 492 error = ENXIO; 493 goto out; 494 } 495 cpu_est_clockrate(pc->pc_cpuid, &rate); 496 rate /= 1000000; 497 for (i = 0; i < count; i++) { 498 if (CPUFREQ_CMP(rate, levels[i].total_set.freq)) { 499 sc->curr_level = levels[i]; 500 CF_DEBUG("get estimated freq %d\n", curr_set->freq); 501 goto out; 502 } 503 } 504 error = ENXIO; 505 506 out: 507 if (error == 0) 508 *level = sc->curr_level; 509 510 CF_MTX_UNLOCK(&sc->lock); 511 if (levels) 512 free(levels, M_TEMP); 513 return (error); 514 } 515 516 static int 517 cf_levels_method(device_t dev, struct cf_level *levels, int *count) 518 { 519 struct cf_setting_array *set_arr; 520 struct cf_setting_lst rel_sets; 521 struct cpufreq_softc *sc; 522 struct cf_level *lev; 523 struct cf_setting *sets; 524 struct pcpu *pc; 525 device_t *devs; 526 int error, i, numdevs, set_count, type; 527 uint64_t rate; 528 529 if (levels == NULL || count == NULL) 530 return (EINVAL); 531 532 TAILQ_INIT(&rel_sets); 533 sc = device_get_softc(dev); 534 error = device_get_children(device_get_parent(dev), &devs, &numdevs); 535 if (error) 536 return (error); 537 sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT); 538 if (sets == NULL) { 539 free(devs, M_TEMP); 540 return (ENOMEM); 541 } 542 543 /* Get settings from all cpufreq drivers. */ 544 CF_MTX_LOCK(&sc->lock); 545 for (i = 0; i < numdevs; i++) { 546 /* Skip devices that aren't ready. */ 547 if (!device_is_attached(devs[i])) 548 continue; 549 550 /* 551 * Get settings, skipping drivers that offer no settings or 552 * provide settings for informational purposes only. 553 */ 554 error = CPUFREQ_DRV_TYPE(devs[i], &type); 555 if (error || (type & CPUFREQ_FLAG_INFO_ONLY)) { 556 if (error == 0) { 557 CF_DEBUG("skipping info-only driver %s\n", 558 device_get_nameunit(devs[i])); 559 } 560 continue; 561 } 562 set_count = MAX_SETTINGS; 563 error = CPUFREQ_DRV_SETTINGS(devs[i], sets, &set_count); 564 if (error || set_count == 0) 565 continue; 566 567 /* Add the settings to our absolute/relative lists. */ 568 switch (type & CPUFREQ_TYPE_MASK) { 569 case CPUFREQ_TYPE_ABSOLUTE: 570 error = cpufreq_insert_abs(sc, sets, set_count); 571 break; 572 case CPUFREQ_TYPE_RELATIVE: 573 CF_DEBUG("adding %d relative settings\n", set_count); 574 set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT); 575 if (set_arr == NULL) { 576 error = ENOMEM; 577 goto out; 578 } 579 bcopy(sets, set_arr->sets, set_count * sizeof(*sets)); 580 set_arr->count = set_count; 581 TAILQ_INSERT_TAIL(&rel_sets, set_arr, link); 582 break; 583 default: 584 error = EINVAL; 585 } 586 if (error) 587 goto out; 588 } 589 590 /* 591 * If there are no absolute levels, create a fake one at 100%. We 592 * then cache the clockrate for later use as our base frequency. 593 */ 594 if (TAILQ_EMPTY(&sc->all_levels)) { 595 if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) { 596 sc->max_mhz = cpu_get_nominal_mhz(dev); 597 /* 598 * If the CPU can't report a rate for 100%, hope 599 * the CPU is running at its nominal rate right now, 600 * and use that instead. 601 */ 602 if (sc->max_mhz <= 0) { 603 pc = cpu_get_pcpu(dev); 604 cpu_est_clockrate(pc->pc_cpuid, &rate); 605 sc->max_mhz = rate / 1000000; 606 } 607 } 608 memset(&sets[0], CPUFREQ_VAL_UNKNOWN, sizeof(*sets)); 609 sets[0].freq = sc->max_mhz; 610 sets[0].dev = NULL; 611 error = cpufreq_insert_abs(sc, sets, 1); 612 if (error) 613 goto out; 614 } 615 616 /* Create a combined list of absolute + relative levels. */ 617 TAILQ_FOREACH(set_arr, &rel_sets, link) 618 cpufreq_expand_set(sc, set_arr); 619 620 /* If the caller doesn't have enough space, return the actual count. */ 621 if (sc->all_count > *count) { 622 *count = sc->all_count; 623 error = E2BIG; 624 goto out; 625 } 626 627 /* Finally, output the list of levels. */ 628 i = 0; 629 TAILQ_FOREACH(lev, &sc->all_levels, link) { 630 /* 631 * Skip levels that are too close in frequency to the 632 * previous levels. Some systems report bogus duplicate 633 * settings (i.e., for acpi_perf). 634 */ 635 if (i > 0 && CPUFREQ_CMP(lev->total_set.freq, 636 levels[i - 1].total_set.freq)) { 637 sc->all_count--; 638 continue; 639 } 640 641 /* Skip levels that have a frequency that is too low. */ 642 if (lev->total_set.freq < cf_lowest_freq) { 643 sc->all_count--; 644 continue; 645 } 646 647 levels[i] = *lev; 648 i++; 649 } 650 *count = sc->all_count; 651 error = 0; 652 653 out: 654 /* Clear all levels since we regenerate them each time. */ 655 while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) { 656 TAILQ_REMOVE(&sc->all_levels, lev, link); 657 free(lev, M_TEMP); 658 } 659 sc->all_count = 0; 660 661 CF_MTX_UNLOCK(&sc->lock); 662 while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) { 663 TAILQ_REMOVE(&rel_sets, set_arr, link); 664 free(set_arr, M_TEMP); 665 } 666 free(devs, M_TEMP); 667 free(sets, M_TEMP); 668 return (error); 669 } 670 671 /* 672 * Create levels for an array of absolute settings and insert them in 673 * sorted order in the specified list. 674 */ 675 static int 676 cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets, 677 int count) 678 { 679 struct cf_level_lst *list; 680 struct cf_level *level, *search; 681 int i; 682 683 CF_MTX_ASSERT(&sc->lock); 684 685 list = &sc->all_levels; 686 for (i = 0; i < count; i++) { 687 level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO); 688 if (level == NULL) 689 return (ENOMEM); 690 level->abs_set = sets[i]; 691 level->total_set = sets[i]; 692 level->total_set.dev = NULL; 693 sc->all_count++; 694 695 if (TAILQ_EMPTY(list)) { 696 CF_DEBUG("adding abs setting %d at head\n", 697 sets[i].freq); 698 TAILQ_INSERT_HEAD(list, level, link); 699 continue; 700 } 701 702 TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link) { 703 if (sets[i].freq <= search->total_set.freq) { 704 CF_DEBUG("adding abs setting %d after %d\n", 705 sets[i].freq, search->total_set.freq); 706 TAILQ_INSERT_AFTER(list, search, level, link); 707 break; 708 } 709 } 710 } 711 return (0); 712 } 713 714 /* 715 * Expand a group of relative settings, creating derived levels from them. 716 */ 717 static int 718 cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr) 719 { 720 struct cf_level *fill, *search; 721 struct cf_setting *set; 722 int i; 723 724 CF_MTX_ASSERT(&sc->lock); 725 726 /* 727 * Walk the set of all existing levels in reverse. This is so we 728 * create derived states from the lowest absolute settings first 729 * and discard duplicates created from higher absolute settings. 730 * For instance, a level of 50 Mhz derived from 100 Mhz + 50% is 731 * preferable to 200 Mhz + 25% because absolute settings are more 732 * efficient since they often change the voltage as well. 733 */ 734 TAILQ_FOREACH_REVERSE(search, &sc->all_levels, cf_level_lst, link) { 735 /* Add each setting to the level, duplicating if necessary. */ 736 for (i = 0; i < set_arr->count; i++) { 737 set = &set_arr->sets[i]; 738 739 /* 740 * If this setting is less than 100%, split the level 741 * into two and add this setting to the new level. 742 */ 743 fill = search; 744 if (set->freq < 10000) { 745 fill = cpufreq_dup_set(sc, search, set); 746 747 /* 748 * The new level was a duplicate of an existing 749 * level or its absolute setting is too high 750 * so we freed it. For example, we discard a 751 * derived level of 1000 MHz/25% if a level 752 * of 500 MHz/100% already exists. 753 */ 754 if (fill == NULL) 755 break; 756 } 757 758 /* Add this setting to the existing or new level. */ 759 KASSERT(fill->rel_count < MAX_SETTINGS, 760 ("cpufreq: too many relative drivers (%d)", 761 MAX_SETTINGS)); 762 fill->rel_set[fill->rel_count] = *set; 763 fill->rel_count++; 764 CF_DEBUG( 765 "expand set added rel setting %d%% to %d level\n", 766 set->freq / 100, fill->total_set.freq); 767 } 768 } 769 770 return (0); 771 } 772 773 static struct cf_level * 774 cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup, 775 struct cf_setting *set) 776 { 777 struct cf_level_lst *list; 778 struct cf_level *fill, *itr; 779 struct cf_setting *fill_set, *itr_set; 780 int i; 781 782 CF_MTX_ASSERT(&sc->lock); 783 784 /* 785 * Create a new level, copy it from the old one, and update the 786 * total frequency and power by the percentage specified in the 787 * relative setting. 788 */ 789 fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT); 790 if (fill == NULL) 791 return (NULL); 792 *fill = *dup; 793 fill_set = &fill->total_set; 794 fill_set->freq = 795 ((uint64_t)fill_set->freq * set->freq) / 10000; 796 if (fill_set->power != CPUFREQ_VAL_UNKNOWN) { 797 fill_set->power = ((uint64_t)fill_set->power * set->freq) 798 / 10000; 799 } 800 if (set->lat != CPUFREQ_VAL_UNKNOWN) { 801 if (fill_set->lat != CPUFREQ_VAL_UNKNOWN) 802 fill_set->lat += set->lat; 803 else 804 fill_set->lat = set->lat; 805 } 806 CF_DEBUG("dup set considering derived setting %d\n", fill_set->freq); 807 808 /* 809 * If we copied an old level that we already modified (say, at 100%), 810 * we need to remove that setting before adding this one. Since we 811 * process each setting array in order, we know any settings for this 812 * driver will be found at the end. 813 */ 814 for (i = fill->rel_count; i != 0; i--) { 815 if (fill->rel_set[i - 1].dev != set->dev) 816 break; 817 CF_DEBUG("removed last relative driver: %s\n", 818 device_get_nameunit(set->dev)); 819 fill->rel_count--; 820 } 821 822 /* 823 * Insert the new level in sorted order. If it is a duplicate of an 824 * existing level (1) or has an absolute setting higher than the 825 * existing level (2), do not add it. We can do this since any such 826 * level is guaranteed use less power. For example (1), a level with 827 * one absolute setting of 800 Mhz uses less power than one composed 828 * of an absolute setting of 1600 Mhz and a relative setting at 50%. 829 * Also for example (2), a level of 800 Mhz/75% is preferable to 830 * 1600 Mhz/25% even though the latter has a lower total frequency. 831 */ 832 list = &sc->all_levels; 833 KASSERT(!TAILQ_EMPTY(list), ("all levels list empty in dup set")); 834 TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) { 835 itr_set = &itr->total_set; 836 if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) { 837 CF_DEBUG("dup set rejecting %d (dupe)\n", 838 fill_set->freq); 839 itr = NULL; 840 break; 841 } else if (fill_set->freq < itr_set->freq) { 842 if (fill->abs_set.freq <= itr->abs_set.freq) { 843 CF_DEBUG( 844 "dup done, inserting new level %d after %d\n", 845 fill_set->freq, itr_set->freq); 846 TAILQ_INSERT_AFTER(list, itr, fill, link); 847 sc->all_count++; 848 } else { 849 CF_DEBUG("dup set rejecting %d (abs too big)\n", 850 fill_set->freq); 851 itr = NULL; 852 } 853 break; 854 } 855 } 856 857 /* We didn't find a good place for this new level so free it. */ 858 if (itr == NULL) { 859 CF_DEBUG("dup set freeing new level %d (not optimal)\n", 860 fill_set->freq); 861 free(fill, M_TEMP); 862 fill = NULL; 863 } 864 865 return (fill); 866 } 867 868 static int 869 cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS) 870 { 871 struct cpufreq_softc *sc; 872 struct cf_level *levels; 873 int count, devcount, error, freq, i, n; 874 device_t *devs; 875 876 devs = NULL; 877 sc = oidp->oid_arg1; 878 levels = sc->levels_buf; 879 880 error = CPUFREQ_GET(sc->dev, &levels[0]); 881 if (error) 882 goto out; 883 freq = levels[0].total_set.freq; 884 error = sysctl_handle_int(oidp, &freq, 0, req); 885 if (error != 0 || req->newptr == NULL) 886 goto out; 887 888 /* 889 * While we only call cpufreq_get() on one device (assuming all 890 * CPUs have equal levels), we call cpufreq_set() on all CPUs. 891 * This is needed for some MP systems. 892 */ 893 error = devclass_get_devices(cpufreq_dc, &devs, &devcount); 894 if (error) 895 goto out; 896 for (n = 0; n < devcount; n++) { 897 count = CF_MAX_LEVELS; 898 error = CPUFREQ_LEVELS(devs[n], levels, &count); 899 if (error) { 900 if (error == E2BIG) 901 printf( 902 "cpufreq: need to increase CF_MAX_LEVELS\n"); 903 break; 904 } 905 for (i = 0; i < count; i++) { 906 if (CPUFREQ_CMP(levels[i].total_set.freq, freq)) { 907 error = CPUFREQ_SET(devs[n], &levels[i], 908 CPUFREQ_PRIO_USER); 909 break; 910 } 911 } 912 if (i == count) { 913 error = EINVAL; 914 break; 915 } 916 } 917 918 out: 919 if (devs) 920 free(devs, M_TEMP); 921 return (error); 922 } 923 924 static int 925 cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS) 926 { 927 struct cpufreq_softc *sc; 928 struct cf_level *levels; 929 struct cf_setting *set; 930 struct sbuf sb; 931 int count, error, i; 932 933 sc = oidp->oid_arg1; 934 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 935 936 /* Get settings from the device and generate the output string. */ 937 count = CF_MAX_LEVELS; 938 levels = sc->levels_buf; 939 if (levels == NULL) { 940 sbuf_delete(&sb); 941 return (ENOMEM); 942 } 943 error = CPUFREQ_LEVELS(sc->dev, levels, &count); 944 if (error) { 945 if (error == E2BIG) 946 printf("cpufreq: need to increase CF_MAX_LEVELS\n"); 947 goto out; 948 } 949 if (count) { 950 for (i = 0; i < count; i++) { 951 set = &levels[i].total_set; 952 sbuf_printf(&sb, "%d/%d ", set->freq, set->power); 953 } 954 } else 955 sbuf_cpy(&sb, "0"); 956 sbuf_trim(&sb); 957 sbuf_finish(&sb); 958 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 959 960 out: 961 sbuf_delete(&sb); 962 return (error); 963 } 964 965 static int 966 cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS) 967 { 968 device_t dev; 969 struct cf_setting *sets; 970 struct sbuf sb; 971 int error, i, set_count; 972 973 dev = oidp->oid_arg1; 974 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 975 976 /* Get settings from the device and generate the output string. */ 977 set_count = MAX_SETTINGS; 978 sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT); 979 if (sets == NULL) { 980 sbuf_delete(&sb); 981 return (ENOMEM); 982 } 983 error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count); 984 if (error) 985 goto out; 986 if (set_count) { 987 for (i = 0; i < set_count; i++) 988 sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power); 989 } else 990 sbuf_cpy(&sb, "0"); 991 sbuf_trim(&sb); 992 sbuf_finish(&sb); 993 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 994 995 out: 996 free(sets, M_TEMP); 997 sbuf_delete(&sb); 998 return (error); 999 } 1000 1001 int 1002 cpufreq_register(device_t dev) 1003 { 1004 struct cpufreq_softc *sc; 1005 device_t cf_dev, cpu_dev; 1006 1007 /* Add a sysctl to get each driver's settings separately. */ 1008 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1009 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1010 OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, dev, 0, 1011 cpufreq_settings_sysctl, "A", "CPU frequency driver settings"); 1012 1013 /* 1014 * Add only one cpufreq device to each CPU. Currently, all CPUs 1015 * must offer the same levels and be switched at the same time. 1016 */ 1017 cpu_dev = device_get_parent(dev); 1018 if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) { 1019 sc = device_get_softc(cf_dev); 1020 sc->max_mhz = CPUFREQ_VAL_UNKNOWN; 1021 return (0); 1022 } 1023 1024 /* Add the child device and possibly sysctls. */ 1025 cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", -1); 1026 if (cf_dev == NULL) 1027 return (ENOMEM); 1028 device_quiet(cf_dev); 1029 1030 return (device_probe_and_attach(cf_dev)); 1031 } 1032 1033 int 1034 cpufreq_unregister(device_t dev) 1035 { 1036 device_t cf_dev, *devs; 1037 int cfcount, devcount, error, i, type; 1038 1039 /* 1040 * If this is the last cpufreq child device, remove the control 1041 * device as well. We identify cpufreq children by calling a method 1042 * they support. 1043 */ 1044 error = device_get_children(device_get_parent(dev), &devs, &devcount); 1045 if (error) 1046 return (error); 1047 cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1); 1048 if (cf_dev == NULL) { 1049 device_printf(dev, 1050 "warning: cpufreq_unregister called with no cpufreq device active\n"); 1051 return (0); 1052 } 1053 cfcount = 0; 1054 for (i = 0; i < devcount; i++) { 1055 if (!device_is_attached(devs[i])) 1056 continue; 1057 if (CPUFREQ_DRV_TYPE(devs[i], &type) == 0) 1058 cfcount++; 1059 } 1060 if (cfcount <= 1) 1061 device_delete_child(device_get_parent(cf_dev), cf_dev); 1062 free(devs, M_TEMP); 1063 1064 return (0); 1065 } 1066 1067 int 1068 cpufreq_settings_changed(device_t dev) 1069 { 1070 1071 EVENTHANDLER_INVOKE(cpufreq_levels_changed, 1072 device_get_unit(device_get_parent(dev))); 1073 return (0); 1074 } 1075