1 /*- 2 * Copyright (c) 2004-2007 Nate Lawson (SDG) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/cpu.h> 33 #include <sys/eventhandler.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/proc.h> 39 #include <sys/queue.h> 40 #include <sys/sched.h> 41 #include <sys/sysctl.h> 42 #include <sys/systm.h> 43 #include <sys/sbuf.h> 44 #include <sys/sx.h> 45 #include <sys/timetc.h> 46 #include <sys/taskqueue.h> 47 48 #include "cpufreq_if.h" 49 50 /* 51 * Common CPU frequency glue code. Drivers for specific hardware can 52 * attach this interface to allow users to get/set the CPU frequency. 53 */ 54 55 /* 56 * Number of levels we can handle. Levels are synthesized from settings 57 * so for M settings and N drivers, there may be M*N levels. 58 */ 59 #define CF_MAX_LEVELS 64 60 61 struct cf_saved_freq { 62 struct cf_level level; 63 int priority; 64 SLIST_ENTRY(cf_saved_freq) link; 65 }; 66 67 struct cpufreq_softc { 68 struct sx lock; 69 struct cf_level curr_level; 70 int curr_priority; 71 SLIST_HEAD(, cf_saved_freq) saved_freq; 72 struct cf_level_lst all_levels; 73 int all_count; 74 int max_mhz; 75 device_t dev; 76 struct sysctl_ctx_list sysctl_ctx; 77 struct task startup_task; 78 }; 79 80 struct cf_setting_array { 81 struct cf_setting sets[MAX_SETTINGS]; 82 int count; 83 TAILQ_ENTRY(cf_setting_array) link; 84 }; 85 86 TAILQ_HEAD(cf_setting_lst, cf_setting_array); 87 88 #define CF_MTX_INIT(x) sx_init((x), "cpufreq lock") 89 #define CF_MTX_LOCK(x) sx_xlock((x)) 90 #define CF_MTX_UNLOCK(x) sx_xunlock((x)) 91 #define CF_MTX_ASSERT(x) sx_assert((x), SX_XLOCKED) 92 93 #define CF_DEBUG(msg...) do { \ 94 if (cf_verbose) \ 95 printf("cpufreq: " msg); \ 96 } while (0) 97 98 static int cpufreq_attach(device_t dev); 99 static void cpufreq_startup_task(void *ctx, int pending); 100 static int cpufreq_detach(device_t dev); 101 static int cf_set_method(device_t dev, const struct cf_level *level, 102 int priority); 103 static int cf_get_method(device_t dev, struct cf_level *level); 104 static int cf_levels_method(device_t dev, struct cf_level *levels, 105 int *count); 106 static int cpufreq_insert_abs(struct cpufreq_softc *sc, 107 struct cf_setting *sets, int count); 108 static int cpufreq_expand_set(struct cpufreq_softc *sc, 109 struct cf_setting_array *set_arr); 110 static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc, 111 struct cf_level *dup, struct cf_setting *set); 112 static int cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS); 113 static int cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS); 114 static int cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS); 115 116 static device_method_t cpufreq_methods[] = { 117 DEVMETHOD(device_probe, bus_generic_probe), 118 DEVMETHOD(device_attach, cpufreq_attach), 119 DEVMETHOD(device_detach, cpufreq_detach), 120 121 DEVMETHOD(cpufreq_set, cf_set_method), 122 DEVMETHOD(cpufreq_get, cf_get_method), 123 DEVMETHOD(cpufreq_levels, cf_levels_method), 124 {0, 0} 125 }; 126 static driver_t cpufreq_driver = { 127 "cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc) 128 }; 129 static devclass_t cpufreq_dc; 130 DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, cpufreq_dc, 0, 0); 131 132 static int cf_lowest_freq; 133 static int cf_verbose; 134 TUNABLE_INT("debug.cpufreq.lowest", &cf_lowest_freq); 135 TUNABLE_INT("debug.cpufreq.verbose", &cf_verbose); 136 SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL, "cpufreq debugging"); 137 SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RW, &cf_lowest_freq, 1, 138 "Don't provide levels below this frequency."); 139 SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RW, &cf_verbose, 1, 140 "Print verbose debugging messages"); 141 142 static int 143 cpufreq_attach(device_t dev) 144 { 145 struct cpufreq_softc *sc; 146 device_t parent; 147 int numdevs; 148 149 CF_DEBUG("initializing %s\n", device_get_nameunit(dev)); 150 sc = device_get_softc(dev); 151 parent = device_get_parent(dev); 152 sc->dev = dev; 153 sysctl_ctx_init(&sc->sysctl_ctx); 154 TAILQ_INIT(&sc->all_levels); 155 CF_MTX_INIT(&sc->lock); 156 sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 157 SLIST_INIT(&sc->saved_freq); 158 sc->max_mhz = CPUFREQ_VAL_UNKNOWN; 159 160 /* 161 * Only initialize one set of sysctls for all CPUs. In the future, 162 * if multiple CPUs can have different settings, we can move these 163 * sysctls to be under every CPU instead of just the first one. 164 */ 165 numdevs = devclass_get_count(cpufreq_dc); 166 if (numdevs > 1) 167 return (0); 168 169 CF_DEBUG("initializing one-time data for %s\n", 170 device_get_nameunit(dev)); 171 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 172 SYSCTL_CHILDREN(device_get_sysctl_tree(parent)), 173 OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 174 cpufreq_curr_sysctl, "I", "Current CPU frequency"); 175 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 176 SYSCTL_CHILDREN(device_get_sysctl_tree(parent)), 177 OID_AUTO, "freq_levels", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 178 cpufreq_levels_sysctl, "A", "CPU frequency levels"); 179 180 /* 181 * Queue a one-shot broadcast that levels have changed. 182 * It will run once the system has completed booting. 183 */ 184 TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, dev); 185 taskqueue_enqueue(taskqueue_thread, &sc->startup_task); 186 187 return (0); 188 } 189 190 /* Handle any work to be done for all drivers that attached during boot. */ 191 static void 192 cpufreq_startup_task(void *ctx, int pending) 193 { 194 195 cpufreq_settings_changed((device_t)ctx); 196 } 197 198 static int 199 cpufreq_detach(device_t dev) 200 { 201 struct cpufreq_softc *sc; 202 struct cf_saved_freq *saved_freq; 203 int numdevs; 204 205 CF_DEBUG("shutdown %s\n", device_get_nameunit(dev)); 206 sc = device_get_softc(dev); 207 sysctl_ctx_free(&sc->sysctl_ctx); 208 209 while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) { 210 SLIST_REMOVE_HEAD(&sc->saved_freq, link); 211 free(saved_freq, M_TEMP); 212 } 213 214 /* Only clean up these resources when the last device is detaching. */ 215 numdevs = devclass_get_count(cpufreq_dc); 216 if (numdevs == 1) { 217 CF_DEBUG("final shutdown for %s\n", device_get_nameunit(dev)); 218 } 219 220 return (0); 221 } 222 223 static int 224 cf_set_method(device_t dev, const struct cf_level *level, int priority) 225 { 226 struct cpufreq_softc *sc; 227 const struct cf_setting *set; 228 struct cf_saved_freq *saved_freq, *curr_freq; 229 struct pcpu *pc; 230 int cpu_id, error, i; 231 232 sc = device_get_softc(dev); 233 error = 0; 234 set = NULL; 235 saved_freq = NULL; 236 237 /* We are going to change levels so notify the pre-change handler. */ 238 EVENTHANDLER_INVOKE(cpufreq_pre_change, level, &error); 239 if (error != 0) { 240 EVENTHANDLER_INVOKE(cpufreq_post_change, level, error); 241 return (error); 242 } 243 244 CF_MTX_LOCK(&sc->lock); 245 246 /* 247 * If the requested level has a lower priority, don't allow 248 * the new level right now. 249 */ 250 if (priority < sc->curr_priority) { 251 CF_DEBUG("ignoring, curr prio %d less than %d\n", priority, 252 sc->curr_priority); 253 error = EPERM; 254 goto out; 255 } 256 257 /* 258 * If the caller didn't specify a level and one is saved, prepare to 259 * restore the saved level. If none has been saved, return an error. 260 */ 261 if (level == NULL) { 262 saved_freq = SLIST_FIRST(&sc->saved_freq); 263 if (saved_freq == NULL) { 264 CF_DEBUG("NULL level, no saved level\n"); 265 error = ENXIO; 266 goto out; 267 } 268 level = &saved_freq->level; 269 priority = saved_freq->priority; 270 CF_DEBUG("restoring saved level, freq %d prio %d\n", 271 level->total_set.freq, priority); 272 } 273 274 /* Reject levels that are below our specified threshold. */ 275 if (level->total_set.freq < cf_lowest_freq) { 276 CF_DEBUG("rejecting freq %d, less than %d limit\n", 277 level->total_set.freq, cf_lowest_freq); 278 error = EINVAL; 279 goto out; 280 } 281 282 /* If already at this level, just return. */ 283 if (CPUFREQ_CMP(sc->curr_level.total_set.freq, level->total_set.freq)) { 284 CF_DEBUG("skipping freq %d, same as current level %d\n", 285 level->total_set.freq, sc->curr_level.total_set.freq); 286 goto skip; 287 } 288 289 /* First, set the absolute frequency via its driver. */ 290 set = &level->abs_set; 291 if (set->dev) { 292 if (!device_is_attached(set->dev)) { 293 error = ENXIO; 294 goto out; 295 } 296 297 /* Bind to the target CPU before switching, if necessary. */ 298 cpu_id = PCPU_GET(cpuid); 299 pc = cpu_get_pcpu(set->dev); 300 if (cpu_id != pc->pc_cpuid) { 301 mtx_lock_spin(&sched_lock); 302 sched_bind(curthread, pc->pc_cpuid); 303 mtx_unlock_spin(&sched_lock); 304 } 305 CF_DEBUG("setting abs freq %d on %s (cpu %d)\n", set->freq, 306 device_get_nameunit(set->dev), PCPU_GET(cpuid)); 307 error = CPUFREQ_DRV_SET(set->dev, set); 308 if (cpu_id != pc->pc_cpuid) { 309 mtx_lock_spin(&sched_lock); 310 sched_unbind(curthread); 311 mtx_unlock_spin(&sched_lock); 312 } 313 if (error) { 314 goto out; 315 } 316 } 317 318 /* Next, set any/all relative frequencies via their drivers. */ 319 for (i = 0; i < level->rel_count; i++) { 320 set = &level->rel_set[i]; 321 if (!device_is_attached(set->dev)) { 322 error = ENXIO; 323 goto out; 324 } 325 326 /* Bind to the target CPU before switching, if necessary. */ 327 cpu_id = PCPU_GET(cpuid); 328 pc = cpu_get_pcpu(set->dev); 329 if (cpu_id != pc->pc_cpuid) { 330 mtx_lock_spin(&sched_lock); 331 sched_bind(curthread, pc->pc_cpuid); 332 mtx_unlock_spin(&sched_lock); 333 } 334 CF_DEBUG("setting rel freq %d on %s (cpu %d)\n", set->freq, 335 device_get_nameunit(set->dev), PCPU_GET(cpuid)); 336 error = CPUFREQ_DRV_SET(set->dev, set); 337 if (cpu_id != pc->pc_cpuid) { 338 mtx_lock_spin(&sched_lock); 339 sched_unbind(curthread); 340 mtx_unlock_spin(&sched_lock); 341 } 342 if (error) { 343 /* XXX Back out any successful setting? */ 344 goto out; 345 } 346 } 347 348 skip: 349 /* 350 * Before recording the current level, check if we're going to a 351 * higher priority. If so, save the previous level and priority. 352 */ 353 if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN && 354 priority > sc->curr_priority) { 355 CF_DEBUG("saving level, freq %d prio %d\n", 356 sc->curr_level.total_set.freq, sc->curr_priority); 357 curr_freq = malloc(sizeof(*curr_freq), M_TEMP, M_NOWAIT); 358 if (curr_freq == NULL) { 359 error = ENOMEM; 360 goto out; 361 } 362 curr_freq->level = sc->curr_level; 363 curr_freq->priority = sc->curr_priority; 364 SLIST_INSERT_HEAD(&sc->saved_freq, curr_freq, link); 365 } 366 sc->curr_level = *level; 367 sc->curr_priority = priority; 368 369 /* If we were restoring a saved state, reset it to "unused". */ 370 if (saved_freq != NULL) { 371 CF_DEBUG("resetting saved level\n"); 372 sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 373 SLIST_REMOVE_HEAD(&sc->saved_freq, link); 374 free(saved_freq, M_TEMP); 375 } 376 377 out: 378 CF_MTX_UNLOCK(&sc->lock); 379 380 /* 381 * We changed levels (or attempted to) so notify the post-change 382 * handler of new frequency or error. 383 */ 384 EVENTHANDLER_INVOKE(cpufreq_post_change, level, error); 385 if (error && set) 386 device_printf(set->dev, "set freq failed, err %d\n", error); 387 388 return (error); 389 } 390 391 static int 392 cf_get_method(device_t dev, struct cf_level *level) 393 { 394 struct cpufreq_softc *sc; 395 struct cf_level *levels; 396 struct cf_setting *curr_set, set; 397 struct pcpu *pc; 398 device_t *devs; 399 int count, error, i, numdevs; 400 uint64_t rate; 401 402 sc = device_get_softc(dev); 403 error = 0; 404 levels = NULL; 405 406 /* If we already know the current frequency, we're done. */ 407 CF_MTX_LOCK(&sc->lock); 408 curr_set = &sc->curr_level.total_set; 409 if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) { 410 CF_DEBUG("get returning known freq %d\n", curr_set->freq); 411 goto out; 412 } 413 CF_MTX_UNLOCK(&sc->lock); 414 415 /* 416 * We need to figure out the current level. Loop through every 417 * driver, getting the current setting. Then, attempt to get a best 418 * match of settings against each level. 419 */ 420 count = CF_MAX_LEVELS; 421 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 422 if (levels == NULL) 423 return (ENOMEM); 424 error = CPUFREQ_LEVELS(sc->dev, levels, &count); 425 if (error) { 426 if (error == E2BIG) 427 printf("cpufreq: need to increase CF_MAX_LEVELS\n"); 428 free(levels, M_TEMP); 429 return (error); 430 } 431 error = device_get_children(device_get_parent(dev), &devs, &numdevs); 432 if (error) { 433 free(levels, M_TEMP); 434 return (error); 435 } 436 437 /* 438 * Reacquire the lock and search for the given level. 439 * 440 * XXX Note: this is not quite right since we really need to go 441 * through each level and compare both absolute and relative 442 * settings for each driver in the system before making a match. 443 * The estimation code below catches this case though. 444 */ 445 CF_MTX_LOCK(&sc->lock); 446 for (i = 0; i < numdevs && curr_set->freq == CPUFREQ_VAL_UNKNOWN; i++) { 447 if (!device_is_attached(devs[i])) 448 continue; 449 error = CPUFREQ_DRV_GET(devs[i], &set); 450 if (error) 451 continue; 452 for (i = 0; i < count; i++) { 453 if (CPUFREQ_CMP(set.freq, levels[i].total_set.freq)) { 454 sc->curr_level = levels[i]; 455 break; 456 } 457 } 458 } 459 free(devs, M_TEMP); 460 if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) { 461 CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq); 462 goto out; 463 } 464 465 /* 466 * We couldn't find an exact match, so attempt to estimate and then 467 * match against a level. 468 */ 469 pc = cpu_get_pcpu(dev); 470 if (pc == NULL) { 471 error = ENXIO; 472 goto out; 473 } 474 cpu_est_clockrate(pc->pc_cpuid, &rate); 475 rate /= 1000000; 476 for (i = 0; i < count; i++) { 477 if (CPUFREQ_CMP(rate, levels[i].total_set.freq)) { 478 sc->curr_level = levels[i]; 479 CF_DEBUG("get estimated freq %d\n", curr_set->freq); 480 break; 481 } 482 } 483 484 out: 485 if (error == 0) 486 *level = sc->curr_level; 487 488 CF_MTX_UNLOCK(&sc->lock); 489 if (levels) 490 free(levels, M_TEMP); 491 return (error); 492 } 493 494 static int 495 cf_levels_method(device_t dev, struct cf_level *levels, int *count) 496 { 497 struct cf_setting_array *set_arr; 498 struct cf_setting_lst rel_sets; 499 struct cpufreq_softc *sc; 500 struct cf_level *lev; 501 struct cf_setting *sets; 502 struct pcpu *pc; 503 device_t *devs; 504 int error, i, numdevs, set_count, type; 505 uint64_t rate; 506 507 if (levels == NULL || count == NULL) 508 return (EINVAL); 509 510 TAILQ_INIT(&rel_sets); 511 sc = device_get_softc(dev); 512 error = device_get_children(device_get_parent(dev), &devs, &numdevs); 513 if (error) 514 return (error); 515 sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT); 516 if (sets == NULL) { 517 free(devs, M_TEMP); 518 return (ENOMEM); 519 } 520 521 /* Get settings from all cpufreq drivers. */ 522 CF_MTX_LOCK(&sc->lock); 523 for (i = 0; i < numdevs; i++) { 524 /* Skip devices that aren't ready. */ 525 if (!device_is_attached(devs[i])) 526 continue; 527 528 /* 529 * Get settings, skipping drivers that offer no settings or 530 * provide settings for informational purposes only. 531 */ 532 error = CPUFREQ_DRV_TYPE(devs[i], &type); 533 if (error || (type & CPUFREQ_FLAG_INFO_ONLY)) { 534 if (error == 0) { 535 CF_DEBUG("skipping info-only driver %s\n", 536 device_get_nameunit(devs[i])); 537 } 538 continue; 539 } 540 set_count = MAX_SETTINGS; 541 error = CPUFREQ_DRV_SETTINGS(devs[i], sets, &set_count); 542 if (error || set_count == 0) 543 continue; 544 545 /* Add the settings to our absolute/relative lists. */ 546 switch (type & CPUFREQ_TYPE_MASK) { 547 case CPUFREQ_TYPE_ABSOLUTE: 548 error = cpufreq_insert_abs(sc, sets, set_count); 549 break; 550 case CPUFREQ_TYPE_RELATIVE: 551 CF_DEBUG("adding %d relative settings\n", set_count); 552 set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT); 553 if (set_arr == NULL) { 554 error = ENOMEM; 555 goto out; 556 } 557 bcopy(sets, set_arr->sets, set_count * sizeof(*sets)); 558 set_arr->count = set_count; 559 TAILQ_INSERT_TAIL(&rel_sets, set_arr, link); 560 break; 561 default: 562 error = EINVAL; 563 } 564 if (error) 565 goto out; 566 } 567 568 /* 569 * If there are no absolute levels, create a fake one at 100%. We 570 * then cache the clockrate for later use as our base frequency. 571 * 572 * XXX This assumes that the first time through, if we only have 573 * relative drivers, the CPU is currently running at 100%. 574 */ 575 if (TAILQ_EMPTY(&sc->all_levels)) { 576 if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) { 577 pc = cpu_get_pcpu(dev); 578 cpu_est_clockrate(pc->pc_cpuid, &rate); 579 sc->max_mhz = rate / 1000000; 580 } 581 memset(&sets[0], CPUFREQ_VAL_UNKNOWN, sizeof(*sets)); 582 sets[0].freq = sc->max_mhz; 583 sets[0].dev = NULL; 584 error = cpufreq_insert_abs(sc, sets, 1); 585 if (error) 586 goto out; 587 } 588 589 /* Create a combined list of absolute + relative levels. */ 590 TAILQ_FOREACH(set_arr, &rel_sets, link) 591 cpufreq_expand_set(sc, set_arr); 592 593 /* If the caller doesn't have enough space, return the actual count. */ 594 if (sc->all_count > *count) { 595 *count = sc->all_count; 596 error = E2BIG; 597 goto out; 598 } 599 600 /* Finally, output the list of levels. */ 601 i = 0; 602 TAILQ_FOREACH(lev, &sc->all_levels, link) { 603 /* Skip levels that have a frequency that is too low. */ 604 if (lev->total_set.freq < cf_lowest_freq) { 605 sc->all_count--; 606 continue; 607 } 608 609 levels[i] = *lev; 610 i++; 611 } 612 *count = sc->all_count; 613 error = 0; 614 615 out: 616 /* Clear all levels since we regenerate them each time. */ 617 while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) { 618 TAILQ_REMOVE(&sc->all_levels, lev, link); 619 free(lev, M_TEMP); 620 } 621 sc->all_count = 0; 622 623 CF_MTX_UNLOCK(&sc->lock); 624 while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) { 625 TAILQ_REMOVE(&rel_sets, set_arr, link); 626 free(set_arr, M_TEMP); 627 } 628 free(devs, M_TEMP); 629 free(sets, M_TEMP); 630 return (error); 631 } 632 633 /* 634 * Create levels for an array of absolute settings and insert them in 635 * sorted order in the specified list. 636 */ 637 static int 638 cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets, 639 int count) 640 { 641 struct cf_level_lst *list; 642 struct cf_level *level, *search; 643 int i; 644 645 CF_MTX_ASSERT(&sc->lock); 646 647 list = &sc->all_levels; 648 for (i = 0; i < count; i++) { 649 level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO); 650 if (level == NULL) 651 return (ENOMEM); 652 level->abs_set = sets[i]; 653 level->total_set = sets[i]; 654 level->total_set.dev = NULL; 655 sc->all_count++; 656 657 if (TAILQ_EMPTY(list)) { 658 CF_DEBUG("adding abs setting %d at head\n", 659 sets[i].freq); 660 TAILQ_INSERT_HEAD(list, level, link); 661 continue; 662 } 663 664 TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link) { 665 if (sets[i].freq <= search->total_set.freq) { 666 CF_DEBUG("adding abs setting %d after %d\n", 667 sets[i].freq, search->total_set.freq); 668 TAILQ_INSERT_AFTER(list, search, level, link); 669 break; 670 } 671 } 672 } 673 return (0); 674 } 675 676 /* 677 * Expand a group of relative settings, creating derived levels from them. 678 */ 679 static int 680 cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr) 681 { 682 struct cf_level *fill, *search; 683 struct cf_setting *set; 684 int i; 685 686 CF_MTX_ASSERT(&sc->lock); 687 688 /* 689 * Walk the set of all existing levels in reverse. This is so we 690 * create derived states from the lowest absolute settings first 691 * and discard duplicates created from higher absolute settings. 692 * For instance, a level of 50 Mhz derived from 100 Mhz + 50% is 693 * preferable to 200 Mhz + 25% because absolute settings are more 694 * efficient since they often change the voltage as well. 695 */ 696 TAILQ_FOREACH_REVERSE(search, &sc->all_levels, cf_level_lst, link) { 697 /* Add each setting to the level, duplicating if necessary. */ 698 for (i = 0; i < set_arr->count; i++) { 699 set = &set_arr->sets[i]; 700 701 /* 702 * If this setting is less than 100%, split the level 703 * into two and add this setting to the new level. 704 */ 705 fill = search; 706 if (set->freq < 10000) { 707 fill = cpufreq_dup_set(sc, search, set); 708 709 /* 710 * The new level was a duplicate of an existing 711 * level or its absolute setting is too high 712 * so we freed it. For example, we discard a 713 * derived level of 1000 MHz/25% if a level 714 * of 500 MHz/100% already exists. 715 */ 716 if (fill == NULL) 717 break; 718 } 719 720 /* Add this setting to the existing or new level. */ 721 KASSERT(fill->rel_count < MAX_SETTINGS, 722 ("cpufreq: too many relative drivers (%d)", 723 MAX_SETTINGS)); 724 fill->rel_set[fill->rel_count] = *set; 725 fill->rel_count++; 726 CF_DEBUG( 727 "expand set added rel setting %d%% to %d level\n", 728 set->freq / 100, fill->total_set.freq); 729 } 730 } 731 732 return (0); 733 } 734 735 static struct cf_level * 736 cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup, 737 struct cf_setting *set) 738 { 739 struct cf_level_lst *list; 740 struct cf_level *fill, *itr; 741 struct cf_setting *fill_set, *itr_set; 742 int i; 743 744 CF_MTX_ASSERT(&sc->lock); 745 746 /* 747 * Create a new level, copy it from the old one, and update the 748 * total frequency and power by the percentage specified in the 749 * relative setting. 750 */ 751 fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT); 752 if (fill == NULL) 753 return (NULL); 754 *fill = *dup; 755 fill_set = &fill->total_set; 756 fill_set->freq = 757 ((uint64_t)fill_set->freq * set->freq) / 10000; 758 if (fill_set->power != CPUFREQ_VAL_UNKNOWN) { 759 fill_set->power = ((uint64_t)fill_set->power * set->freq) 760 / 10000; 761 } 762 if (set->lat != CPUFREQ_VAL_UNKNOWN) { 763 if (fill_set->lat != CPUFREQ_VAL_UNKNOWN) 764 fill_set->lat += set->lat; 765 else 766 fill_set->lat = set->lat; 767 } 768 CF_DEBUG("dup set considering derived setting %d\n", fill_set->freq); 769 770 /* 771 * If we copied an old level that we already modified (say, at 100%), 772 * we need to remove that setting before adding this one. Since we 773 * process each setting array in order, we know any settings for this 774 * driver will be found at the end. 775 */ 776 for (i = fill->rel_count; i != 0; i--) { 777 if (fill->rel_set[i - 1].dev != set->dev) 778 break; 779 CF_DEBUG("removed last relative driver: %s\n", 780 device_get_nameunit(set->dev)); 781 fill->rel_count--; 782 } 783 784 /* 785 * Insert the new level in sorted order. If it is a duplicate of an 786 * existing level (1) or has an absolute setting higher than the 787 * existing level (2), do not add it. We can do this since any such 788 * level is guaranteed use less power. For example (1), a level with 789 * one absolute setting of 800 Mhz uses less power than one composed 790 * of an absolute setting of 1600 Mhz and a relative setting at 50%. 791 * Also for example (2), a level of 800 Mhz/75% is preferable to 792 * 1600 Mhz/25% even though the latter has a lower total frequency. 793 */ 794 list = &sc->all_levels; 795 KASSERT(!TAILQ_EMPTY(list), ("all levels list empty in dup set")); 796 TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) { 797 itr_set = &itr->total_set; 798 if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) { 799 CF_DEBUG("dup set rejecting %d (dupe)\n", 800 fill_set->freq); 801 itr = NULL; 802 break; 803 } else if (fill_set->freq < itr_set->freq) { 804 if (fill->abs_set.freq <= itr->abs_set.freq) { 805 CF_DEBUG( 806 "dup done, inserting new level %d after %d\n", 807 fill_set->freq, itr_set->freq); 808 TAILQ_INSERT_AFTER(list, itr, fill, link); 809 sc->all_count++; 810 } else { 811 CF_DEBUG("dup set rejecting %d (abs too big)\n", 812 fill_set->freq); 813 itr = NULL; 814 } 815 break; 816 } 817 } 818 819 /* We didn't find a good place for this new level so free it. */ 820 if (itr == NULL) { 821 CF_DEBUG("dup set freeing new level %d (not optimal)\n", 822 fill_set->freq); 823 free(fill, M_TEMP); 824 fill = NULL; 825 } 826 827 return (fill); 828 } 829 830 static int 831 cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS) 832 { 833 struct cpufreq_softc *sc; 834 struct cf_level *levels; 835 int count, devcount, error, freq, i, n; 836 device_t *devs; 837 838 devs = NULL; 839 sc = oidp->oid_arg1; 840 levels = malloc(CF_MAX_LEVELS * sizeof(*levels), M_TEMP, M_NOWAIT); 841 if (levels == NULL) 842 return (ENOMEM); 843 844 error = CPUFREQ_GET(sc->dev, &levels[0]); 845 if (error) 846 goto out; 847 freq = levels[0].total_set.freq; 848 error = sysctl_handle_int(oidp, &freq, 0, req); 849 if (error != 0 || req->newptr == NULL) 850 goto out; 851 852 /* 853 * While we only call cpufreq_get() on one device (assuming all 854 * CPUs have equal levels), we call cpufreq_set() on all CPUs. 855 * This is needed for some MP systems. 856 */ 857 error = devclass_get_devices(cpufreq_dc, &devs, &devcount); 858 if (error) 859 goto out; 860 for (n = 0; n < devcount; n++) { 861 count = CF_MAX_LEVELS; 862 error = CPUFREQ_LEVELS(devs[n], levels, &count); 863 if (error) { 864 if (error == E2BIG) 865 printf( 866 "cpufreq: need to increase CF_MAX_LEVELS\n"); 867 break; 868 } 869 for (i = 0; i < count; i++) { 870 if (CPUFREQ_CMP(levels[i].total_set.freq, freq)) { 871 error = CPUFREQ_SET(devs[n], &levels[i], 872 CPUFREQ_PRIO_USER); 873 break; 874 } 875 } 876 if (i == count) { 877 error = EINVAL; 878 break; 879 } 880 } 881 882 out: 883 if (devs) 884 free(devs, M_TEMP); 885 if (levels) 886 free(levels, M_TEMP); 887 return (error); 888 } 889 890 static int 891 cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS) 892 { 893 struct cpufreq_softc *sc; 894 struct cf_level *levels; 895 struct cf_setting *set; 896 struct sbuf sb; 897 int count, error, i; 898 899 sc = oidp->oid_arg1; 900 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 901 902 /* Get settings from the device and generate the output string. */ 903 count = CF_MAX_LEVELS; 904 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 905 if (levels == NULL) 906 return (ENOMEM); 907 error = CPUFREQ_LEVELS(sc->dev, levels, &count); 908 if (error) { 909 if (error == E2BIG) 910 printf("cpufreq: need to increase CF_MAX_LEVELS\n"); 911 goto out; 912 } 913 if (count) { 914 for (i = 0; i < count; i++) { 915 set = &levels[i].total_set; 916 sbuf_printf(&sb, "%d/%d ", set->freq, set->power); 917 } 918 } else 919 sbuf_cpy(&sb, "0"); 920 sbuf_trim(&sb); 921 sbuf_finish(&sb); 922 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 923 924 out: 925 free(levels, M_TEMP); 926 sbuf_delete(&sb); 927 return (error); 928 } 929 930 static int 931 cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS) 932 { 933 device_t dev; 934 struct cf_setting *sets; 935 struct sbuf sb; 936 int error, i, set_count; 937 938 dev = oidp->oid_arg1; 939 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 940 941 /* Get settings from the device and generate the output string. */ 942 set_count = MAX_SETTINGS; 943 sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT); 944 if (sets == NULL) 945 return (ENOMEM); 946 error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count); 947 if (error) 948 goto out; 949 if (set_count) { 950 for (i = 0; i < set_count; i++) 951 sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power); 952 } else 953 sbuf_cpy(&sb, "0"); 954 sbuf_trim(&sb); 955 sbuf_finish(&sb); 956 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 957 958 out: 959 free(sets, M_TEMP); 960 sbuf_delete(&sb); 961 return (error); 962 } 963 964 int 965 cpufreq_register(device_t dev) 966 { 967 struct cpufreq_softc *sc; 968 device_t cf_dev, cpu_dev; 969 970 /* Add a sysctl to get each driver's settings separately. */ 971 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 972 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 973 OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, dev, 0, 974 cpufreq_settings_sysctl, "A", "CPU frequency driver settings"); 975 976 /* 977 * Add only one cpufreq device to each CPU. Currently, all CPUs 978 * must offer the same levels and be switched at the same time. 979 */ 980 cpu_dev = device_get_parent(dev); 981 if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) { 982 sc = device_get_softc(cf_dev); 983 sc->max_mhz = CPUFREQ_VAL_UNKNOWN; 984 return (0); 985 } 986 987 /* Add the child device and possibly sysctls. */ 988 cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", -1); 989 if (cf_dev == NULL) 990 return (ENOMEM); 991 device_quiet(cf_dev); 992 993 return (device_probe_and_attach(cf_dev)); 994 } 995 996 int 997 cpufreq_unregister(device_t dev) 998 { 999 device_t cf_dev, *devs; 1000 int cfcount, devcount, error, i, type; 1001 1002 /* 1003 * If this is the last cpufreq child device, remove the control 1004 * device as well. We identify cpufreq children by calling a method 1005 * they support. 1006 */ 1007 error = device_get_children(device_get_parent(dev), &devs, &devcount); 1008 if (error) 1009 return (error); 1010 cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1); 1011 if (cf_dev == NULL) { 1012 device_printf(dev, 1013 "warning: cpufreq_unregister called with no cpufreq device active\n"); 1014 return (0); 1015 } 1016 cfcount = 0; 1017 for (i = 0; i < devcount; i++) { 1018 if (!device_is_attached(devs[i])) 1019 continue; 1020 if (CPUFREQ_DRV_TYPE(devs[i], &type) == 0) 1021 cfcount++; 1022 } 1023 if (cfcount <= 1) 1024 device_delete_child(device_get_parent(cf_dev), cf_dev); 1025 free(devs, M_TEMP); 1026 1027 return (0); 1028 } 1029 1030 int 1031 cpufreq_settings_changed(device_t dev) 1032 { 1033 1034 EVENTHANDLER_INVOKE(cpufreq_levels_changed, 1035 device_get_unit(device_get_parent(dev))); 1036 return (0); 1037 } 1038