1 /*- 2 * Copyright (c) 2004-2007 Nate Lawson (SDG) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/cpu.h> 33 #include <sys/eventhandler.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/proc.h> 39 #include <sys/queue.h> 40 #include <sys/sbuf.h> 41 #include <sys/sched.h> 42 #include <sys/smp.h> 43 #include <sys/sysctl.h> 44 #include <sys/systm.h> 45 #include <sys/sx.h> 46 #include <sys/timetc.h> 47 #include <sys/taskqueue.h> 48 49 #include "cpufreq_if.h" 50 51 /* 52 * Common CPU frequency glue code. Drivers for specific hardware can 53 * attach this interface to allow users to get/set the CPU frequency. 54 */ 55 56 /* 57 * Number of levels we can handle. Levels are synthesized from settings 58 * so for M settings and N drivers, there may be M*N levels. 59 */ 60 #define CF_MAX_LEVELS 64 61 62 struct cf_saved_freq { 63 struct cf_level level; 64 int priority; 65 SLIST_ENTRY(cf_saved_freq) link; 66 }; 67 68 struct cpufreq_softc { 69 struct sx lock; 70 struct cf_level curr_level; 71 int curr_priority; 72 SLIST_HEAD(, cf_saved_freq) saved_freq; 73 struct cf_level_lst all_levels; 74 int all_count; 75 int max_mhz; 76 device_t dev; 77 struct sysctl_ctx_list sysctl_ctx; 78 struct task startup_task; 79 }; 80 81 struct cf_setting_array { 82 struct cf_setting sets[MAX_SETTINGS]; 83 int count; 84 TAILQ_ENTRY(cf_setting_array) link; 85 }; 86 87 TAILQ_HEAD(cf_setting_lst, cf_setting_array); 88 89 #define CF_MTX_INIT(x) sx_init((x), "cpufreq lock") 90 #define CF_MTX_LOCK(x) sx_xlock((x)) 91 #define CF_MTX_UNLOCK(x) sx_xunlock((x)) 92 #define CF_MTX_ASSERT(x) sx_assert((x), SX_XLOCKED) 93 94 #define CF_DEBUG(msg...) do { \ 95 if (cf_verbose) \ 96 printf("cpufreq: " msg); \ 97 } while (0) 98 99 static int cpufreq_attach(device_t dev); 100 static void cpufreq_startup_task(void *ctx, int pending); 101 static int cpufreq_detach(device_t dev); 102 static int cf_set_method(device_t dev, const struct cf_level *level, 103 int priority); 104 static int cf_get_method(device_t dev, struct cf_level *level); 105 static int cf_levels_method(device_t dev, struct cf_level *levels, 106 int *count); 107 static int cpufreq_insert_abs(struct cpufreq_softc *sc, 108 struct cf_setting *sets, int count); 109 static int cpufreq_expand_set(struct cpufreq_softc *sc, 110 struct cf_setting_array *set_arr); 111 static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc, 112 struct cf_level *dup, struct cf_setting *set); 113 static int cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS); 114 static int cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS); 115 static int cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS); 116 117 static device_method_t cpufreq_methods[] = { 118 DEVMETHOD(device_probe, bus_generic_probe), 119 DEVMETHOD(device_attach, cpufreq_attach), 120 DEVMETHOD(device_detach, cpufreq_detach), 121 122 DEVMETHOD(cpufreq_set, cf_set_method), 123 DEVMETHOD(cpufreq_get, cf_get_method), 124 DEVMETHOD(cpufreq_levels, cf_levels_method), 125 {0, 0} 126 }; 127 static driver_t cpufreq_driver = { 128 "cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc) 129 }; 130 static devclass_t cpufreq_dc; 131 DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, cpufreq_dc, 0, 0); 132 133 static int cf_lowest_freq; 134 static int cf_verbose; 135 TUNABLE_INT("debug.cpufreq.lowest", &cf_lowest_freq); 136 TUNABLE_INT("debug.cpufreq.verbose", &cf_verbose); 137 SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL, "cpufreq debugging"); 138 SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RW, &cf_lowest_freq, 1, 139 "Don't provide levels below this frequency."); 140 SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RW, &cf_verbose, 1, 141 "Print verbose debugging messages"); 142 143 static int 144 cpufreq_attach(device_t dev) 145 { 146 struct cpufreq_softc *sc; 147 device_t parent; 148 int numdevs; 149 150 CF_DEBUG("initializing %s\n", device_get_nameunit(dev)); 151 sc = device_get_softc(dev); 152 parent = device_get_parent(dev); 153 sc->dev = dev; 154 sysctl_ctx_init(&sc->sysctl_ctx); 155 TAILQ_INIT(&sc->all_levels); 156 CF_MTX_INIT(&sc->lock); 157 sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 158 SLIST_INIT(&sc->saved_freq); 159 sc->max_mhz = CPUFREQ_VAL_UNKNOWN; 160 161 /* 162 * Only initialize one set of sysctls for all CPUs. In the future, 163 * if multiple CPUs can have different settings, we can move these 164 * sysctls to be under every CPU instead of just the first one. 165 */ 166 numdevs = devclass_get_count(cpufreq_dc); 167 if (numdevs > 1) 168 return (0); 169 170 CF_DEBUG("initializing one-time data for %s\n", 171 device_get_nameunit(dev)); 172 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 173 SYSCTL_CHILDREN(device_get_sysctl_tree(parent)), 174 OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 175 cpufreq_curr_sysctl, "I", "Current CPU frequency"); 176 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 177 SYSCTL_CHILDREN(device_get_sysctl_tree(parent)), 178 OID_AUTO, "freq_levels", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 179 cpufreq_levels_sysctl, "A", "CPU frequency levels"); 180 181 /* 182 * Queue a one-shot broadcast that levels have changed. 183 * It will run once the system has completed booting. 184 */ 185 TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, dev); 186 taskqueue_enqueue(taskqueue_thread, &sc->startup_task); 187 188 return (0); 189 } 190 191 /* Handle any work to be done for all drivers that attached during boot. */ 192 static void 193 cpufreq_startup_task(void *ctx, int pending) 194 { 195 196 cpufreq_settings_changed((device_t)ctx); 197 } 198 199 static int 200 cpufreq_detach(device_t dev) 201 { 202 struct cpufreq_softc *sc; 203 struct cf_saved_freq *saved_freq; 204 int numdevs; 205 206 CF_DEBUG("shutdown %s\n", device_get_nameunit(dev)); 207 sc = device_get_softc(dev); 208 sysctl_ctx_free(&sc->sysctl_ctx); 209 210 while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) { 211 SLIST_REMOVE_HEAD(&sc->saved_freq, link); 212 free(saved_freq, M_TEMP); 213 } 214 215 /* Only clean up these resources when the last device is detaching. */ 216 numdevs = devclass_get_count(cpufreq_dc); 217 if (numdevs == 1) { 218 CF_DEBUG("final shutdown for %s\n", device_get_nameunit(dev)); 219 } 220 221 return (0); 222 } 223 224 static int 225 cf_set_method(device_t dev, const struct cf_level *level, int priority) 226 { 227 struct cpufreq_softc *sc; 228 const struct cf_setting *set; 229 struct cf_saved_freq *saved_freq, *curr_freq; 230 struct pcpu *pc; 231 int error, i; 232 233 sc = device_get_softc(dev); 234 error = 0; 235 set = NULL; 236 saved_freq = NULL; 237 238 /* We are going to change levels so notify the pre-change handler. */ 239 EVENTHANDLER_INVOKE(cpufreq_pre_change, level, &error); 240 if (error != 0) { 241 EVENTHANDLER_INVOKE(cpufreq_post_change, level, error); 242 return (error); 243 } 244 245 CF_MTX_LOCK(&sc->lock); 246 247 #ifdef SMP 248 /* 249 * If still booting and secondary CPUs not started yet, don't allow 250 * changing the frequency until they're online. This is because we 251 * can't switch to them using sched_bind() and thus we'd only be 252 * switching the main CPU. XXXTODO: Need to think more about how to 253 * handle having different CPUs at different frequencies. 254 */ 255 if (mp_ncpus > 1 && !smp_active) { 256 device_printf(dev, "rejecting change, SMP not started yet\n"); 257 error = ENXIO; 258 goto out; 259 } 260 #endif /* SMP */ 261 262 /* 263 * If the requested level has a lower priority, don't allow 264 * the new level right now. 265 */ 266 if (priority < sc->curr_priority) { 267 CF_DEBUG("ignoring, curr prio %d less than %d\n", priority, 268 sc->curr_priority); 269 error = EPERM; 270 goto out; 271 } 272 273 /* 274 * If the caller didn't specify a level and one is saved, prepare to 275 * restore the saved level. If none has been saved, return an error. 276 */ 277 if (level == NULL) { 278 saved_freq = SLIST_FIRST(&sc->saved_freq); 279 if (saved_freq == NULL) { 280 CF_DEBUG("NULL level, no saved level\n"); 281 error = ENXIO; 282 goto out; 283 } 284 level = &saved_freq->level; 285 priority = saved_freq->priority; 286 CF_DEBUG("restoring saved level, freq %d prio %d\n", 287 level->total_set.freq, priority); 288 } 289 290 /* Reject levels that are below our specified threshold. */ 291 if (level->total_set.freq < cf_lowest_freq) { 292 CF_DEBUG("rejecting freq %d, less than %d limit\n", 293 level->total_set.freq, cf_lowest_freq); 294 error = EINVAL; 295 goto out; 296 } 297 298 /* If already at this level, just return. */ 299 if (CPUFREQ_CMP(sc->curr_level.total_set.freq, level->total_set.freq)) { 300 CF_DEBUG("skipping freq %d, same as current level %d\n", 301 level->total_set.freq, sc->curr_level.total_set.freq); 302 goto skip; 303 } 304 305 /* First, set the absolute frequency via its driver. */ 306 set = &level->abs_set; 307 if (set->dev) { 308 if (!device_is_attached(set->dev)) { 309 error = ENXIO; 310 goto out; 311 } 312 313 /* Bind to the target CPU before switching. */ 314 pc = cpu_get_pcpu(set->dev); 315 thread_lock(curthread); 316 sched_bind(curthread, pc->pc_cpuid); 317 thread_unlock(curthread); 318 CF_DEBUG("setting abs freq %d on %s (cpu %d)\n", set->freq, 319 device_get_nameunit(set->dev), PCPU_GET(cpuid)); 320 error = CPUFREQ_DRV_SET(set->dev, set); 321 thread_lock(curthread); 322 sched_unbind(curthread); 323 thread_unlock(curthread); 324 if (error) { 325 goto out; 326 } 327 } 328 329 /* Next, set any/all relative frequencies via their drivers. */ 330 for (i = 0; i < level->rel_count; i++) { 331 set = &level->rel_set[i]; 332 if (!device_is_attached(set->dev)) { 333 error = ENXIO; 334 goto out; 335 } 336 337 /* Bind to the target CPU before switching. */ 338 pc = cpu_get_pcpu(set->dev); 339 thread_lock(curthread); 340 sched_bind(curthread, pc->pc_cpuid); 341 thread_unlock(curthread); 342 CF_DEBUG("setting rel freq %d on %s (cpu %d)\n", set->freq, 343 device_get_nameunit(set->dev), PCPU_GET(cpuid)); 344 error = CPUFREQ_DRV_SET(set->dev, set); 345 thread_lock(curthread); 346 sched_unbind(curthread); 347 thread_unlock(curthread); 348 if (error) { 349 /* XXX Back out any successful setting? */ 350 goto out; 351 } 352 } 353 354 skip: 355 /* 356 * Before recording the current level, check if we're going to a 357 * higher priority. If so, save the previous level and priority. 358 */ 359 if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN && 360 priority > sc->curr_priority) { 361 CF_DEBUG("saving level, freq %d prio %d\n", 362 sc->curr_level.total_set.freq, sc->curr_priority); 363 curr_freq = malloc(sizeof(*curr_freq), M_TEMP, M_NOWAIT); 364 if (curr_freq == NULL) { 365 error = ENOMEM; 366 goto out; 367 } 368 curr_freq->level = sc->curr_level; 369 curr_freq->priority = sc->curr_priority; 370 SLIST_INSERT_HEAD(&sc->saved_freq, curr_freq, link); 371 } 372 sc->curr_level = *level; 373 sc->curr_priority = priority; 374 375 /* If we were restoring a saved state, reset it to "unused". */ 376 if (saved_freq != NULL) { 377 CF_DEBUG("resetting saved level\n"); 378 sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 379 SLIST_REMOVE_HEAD(&sc->saved_freq, link); 380 free(saved_freq, M_TEMP); 381 } 382 383 out: 384 CF_MTX_UNLOCK(&sc->lock); 385 386 /* 387 * We changed levels (or attempted to) so notify the post-change 388 * handler of new frequency or error. 389 */ 390 EVENTHANDLER_INVOKE(cpufreq_post_change, level, error); 391 if (error && set) 392 device_printf(set->dev, "set freq failed, err %d\n", error); 393 394 return (error); 395 } 396 397 static int 398 cf_get_method(device_t dev, struct cf_level *level) 399 { 400 struct cpufreq_softc *sc; 401 struct cf_level *levels; 402 struct cf_setting *curr_set, set; 403 struct pcpu *pc; 404 device_t *devs; 405 int count, error, i, n, numdevs; 406 uint64_t rate; 407 408 sc = device_get_softc(dev); 409 error = 0; 410 levels = NULL; 411 412 /* If we already know the current frequency, we're done. */ 413 CF_MTX_LOCK(&sc->lock); 414 curr_set = &sc->curr_level.total_set; 415 if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) { 416 CF_DEBUG("get returning known freq %d\n", curr_set->freq); 417 goto out; 418 } 419 CF_MTX_UNLOCK(&sc->lock); 420 421 /* 422 * We need to figure out the current level. Loop through every 423 * driver, getting the current setting. Then, attempt to get a best 424 * match of settings against each level. 425 */ 426 count = CF_MAX_LEVELS; 427 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 428 if (levels == NULL) 429 return (ENOMEM); 430 error = CPUFREQ_LEVELS(sc->dev, levels, &count); 431 if (error) { 432 if (error == E2BIG) 433 printf("cpufreq: need to increase CF_MAX_LEVELS\n"); 434 free(levels, M_TEMP); 435 return (error); 436 } 437 error = device_get_children(device_get_parent(dev), &devs, &numdevs); 438 if (error) { 439 free(levels, M_TEMP); 440 return (error); 441 } 442 443 /* 444 * Reacquire the lock and search for the given level. 445 * 446 * XXX Note: this is not quite right since we really need to go 447 * through each level and compare both absolute and relative 448 * settings for each driver in the system before making a match. 449 * The estimation code below catches this case though. 450 */ 451 CF_MTX_LOCK(&sc->lock); 452 for (n = 0; n < numdevs && curr_set->freq == CPUFREQ_VAL_UNKNOWN; n++) { 453 if (!device_is_attached(devs[n])) 454 continue; 455 error = CPUFREQ_DRV_GET(devs[n], &set); 456 if (error) 457 continue; 458 for (i = 0; i < count; i++) { 459 if (CPUFREQ_CMP(set.freq, levels[i].total_set.freq)) { 460 sc->curr_level = levels[i]; 461 break; 462 } 463 } 464 } 465 free(devs, M_TEMP); 466 if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) { 467 CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq); 468 goto out; 469 } 470 471 /* 472 * We couldn't find an exact match, so attempt to estimate and then 473 * match against a level. 474 */ 475 pc = cpu_get_pcpu(dev); 476 if (pc == NULL) { 477 error = ENXIO; 478 goto out; 479 } 480 cpu_est_clockrate(pc->pc_cpuid, &rate); 481 rate /= 1000000; 482 for (i = 0; i < count; i++) { 483 if (CPUFREQ_CMP(rate, levels[i].total_set.freq)) { 484 sc->curr_level = levels[i]; 485 CF_DEBUG("get estimated freq %d\n", curr_set->freq); 486 break; 487 } 488 } 489 490 out: 491 if (error == 0) 492 *level = sc->curr_level; 493 494 CF_MTX_UNLOCK(&sc->lock); 495 if (levels) 496 free(levels, M_TEMP); 497 return (error); 498 } 499 500 static int 501 cf_levels_method(device_t dev, struct cf_level *levels, int *count) 502 { 503 struct cf_setting_array *set_arr; 504 struct cf_setting_lst rel_sets; 505 struct cpufreq_softc *sc; 506 struct cf_level *lev; 507 struct cf_setting *sets; 508 struct pcpu *pc; 509 device_t *devs; 510 int error, i, numdevs, set_count, type; 511 uint64_t rate; 512 513 if (levels == NULL || count == NULL) 514 return (EINVAL); 515 516 TAILQ_INIT(&rel_sets); 517 sc = device_get_softc(dev); 518 error = device_get_children(device_get_parent(dev), &devs, &numdevs); 519 if (error) 520 return (error); 521 sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT); 522 if (sets == NULL) { 523 free(devs, M_TEMP); 524 return (ENOMEM); 525 } 526 527 /* Get settings from all cpufreq drivers. */ 528 CF_MTX_LOCK(&sc->lock); 529 for (i = 0; i < numdevs; i++) { 530 /* Skip devices that aren't ready. */ 531 if (!device_is_attached(devs[i])) 532 continue; 533 534 /* 535 * Get settings, skipping drivers that offer no settings or 536 * provide settings for informational purposes only. 537 */ 538 error = CPUFREQ_DRV_TYPE(devs[i], &type); 539 if (error || (type & CPUFREQ_FLAG_INFO_ONLY)) { 540 if (error == 0) { 541 CF_DEBUG("skipping info-only driver %s\n", 542 device_get_nameunit(devs[i])); 543 } 544 continue; 545 } 546 set_count = MAX_SETTINGS; 547 error = CPUFREQ_DRV_SETTINGS(devs[i], sets, &set_count); 548 if (error || set_count == 0) 549 continue; 550 551 /* Add the settings to our absolute/relative lists. */ 552 switch (type & CPUFREQ_TYPE_MASK) { 553 case CPUFREQ_TYPE_ABSOLUTE: 554 error = cpufreq_insert_abs(sc, sets, set_count); 555 break; 556 case CPUFREQ_TYPE_RELATIVE: 557 CF_DEBUG("adding %d relative settings\n", set_count); 558 set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT); 559 if (set_arr == NULL) { 560 error = ENOMEM; 561 goto out; 562 } 563 bcopy(sets, set_arr->sets, set_count * sizeof(*sets)); 564 set_arr->count = set_count; 565 TAILQ_INSERT_TAIL(&rel_sets, set_arr, link); 566 break; 567 default: 568 error = EINVAL; 569 } 570 if (error) 571 goto out; 572 } 573 574 /* 575 * If there are no absolute levels, create a fake one at 100%. We 576 * then cache the clockrate for later use as our base frequency. 577 * 578 * XXX This assumes that the first time through, if we only have 579 * relative drivers, the CPU is currently running at 100%. 580 */ 581 if (TAILQ_EMPTY(&sc->all_levels)) { 582 if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) { 583 pc = cpu_get_pcpu(dev); 584 cpu_est_clockrate(pc->pc_cpuid, &rate); 585 sc->max_mhz = rate / 1000000; 586 } 587 memset(&sets[0], CPUFREQ_VAL_UNKNOWN, sizeof(*sets)); 588 sets[0].freq = sc->max_mhz; 589 sets[0].dev = NULL; 590 error = cpufreq_insert_abs(sc, sets, 1); 591 if (error) 592 goto out; 593 } 594 595 /* Create a combined list of absolute + relative levels. */ 596 TAILQ_FOREACH(set_arr, &rel_sets, link) 597 cpufreq_expand_set(sc, set_arr); 598 599 /* If the caller doesn't have enough space, return the actual count. */ 600 if (sc->all_count > *count) { 601 *count = sc->all_count; 602 error = E2BIG; 603 goto out; 604 } 605 606 /* Finally, output the list of levels. */ 607 i = 0; 608 TAILQ_FOREACH(lev, &sc->all_levels, link) { 609 /* Skip levels that have a frequency that is too low. */ 610 if (lev->total_set.freq < cf_lowest_freq) { 611 sc->all_count--; 612 continue; 613 } 614 615 levels[i] = *lev; 616 i++; 617 } 618 *count = sc->all_count; 619 error = 0; 620 621 out: 622 /* Clear all levels since we regenerate them each time. */ 623 while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) { 624 TAILQ_REMOVE(&sc->all_levels, lev, link); 625 free(lev, M_TEMP); 626 } 627 sc->all_count = 0; 628 629 CF_MTX_UNLOCK(&sc->lock); 630 while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) { 631 TAILQ_REMOVE(&rel_sets, set_arr, link); 632 free(set_arr, M_TEMP); 633 } 634 free(devs, M_TEMP); 635 free(sets, M_TEMP); 636 return (error); 637 } 638 639 /* 640 * Create levels for an array of absolute settings and insert them in 641 * sorted order in the specified list. 642 */ 643 static int 644 cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets, 645 int count) 646 { 647 struct cf_level_lst *list; 648 struct cf_level *level, *search; 649 int i; 650 651 CF_MTX_ASSERT(&sc->lock); 652 653 list = &sc->all_levels; 654 for (i = 0; i < count; i++) { 655 level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO); 656 if (level == NULL) 657 return (ENOMEM); 658 level->abs_set = sets[i]; 659 level->total_set = sets[i]; 660 level->total_set.dev = NULL; 661 sc->all_count++; 662 663 if (TAILQ_EMPTY(list)) { 664 CF_DEBUG("adding abs setting %d at head\n", 665 sets[i].freq); 666 TAILQ_INSERT_HEAD(list, level, link); 667 continue; 668 } 669 670 TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link) { 671 if (sets[i].freq <= search->total_set.freq) { 672 CF_DEBUG("adding abs setting %d after %d\n", 673 sets[i].freq, search->total_set.freq); 674 TAILQ_INSERT_AFTER(list, search, level, link); 675 break; 676 } 677 } 678 } 679 return (0); 680 } 681 682 /* 683 * Expand a group of relative settings, creating derived levels from them. 684 */ 685 static int 686 cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr) 687 { 688 struct cf_level *fill, *search; 689 struct cf_setting *set; 690 int i; 691 692 CF_MTX_ASSERT(&sc->lock); 693 694 /* 695 * Walk the set of all existing levels in reverse. This is so we 696 * create derived states from the lowest absolute settings first 697 * and discard duplicates created from higher absolute settings. 698 * For instance, a level of 50 Mhz derived from 100 Mhz + 50% is 699 * preferable to 200 Mhz + 25% because absolute settings are more 700 * efficient since they often change the voltage as well. 701 */ 702 TAILQ_FOREACH_REVERSE(search, &sc->all_levels, cf_level_lst, link) { 703 /* Add each setting to the level, duplicating if necessary. */ 704 for (i = 0; i < set_arr->count; i++) { 705 set = &set_arr->sets[i]; 706 707 /* 708 * If this setting is less than 100%, split the level 709 * into two and add this setting to the new level. 710 */ 711 fill = search; 712 if (set->freq < 10000) { 713 fill = cpufreq_dup_set(sc, search, set); 714 715 /* 716 * The new level was a duplicate of an existing 717 * level or its absolute setting is too high 718 * so we freed it. For example, we discard a 719 * derived level of 1000 MHz/25% if a level 720 * of 500 MHz/100% already exists. 721 */ 722 if (fill == NULL) 723 break; 724 } 725 726 /* Add this setting to the existing or new level. */ 727 KASSERT(fill->rel_count < MAX_SETTINGS, 728 ("cpufreq: too many relative drivers (%d)", 729 MAX_SETTINGS)); 730 fill->rel_set[fill->rel_count] = *set; 731 fill->rel_count++; 732 CF_DEBUG( 733 "expand set added rel setting %d%% to %d level\n", 734 set->freq / 100, fill->total_set.freq); 735 } 736 } 737 738 return (0); 739 } 740 741 static struct cf_level * 742 cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup, 743 struct cf_setting *set) 744 { 745 struct cf_level_lst *list; 746 struct cf_level *fill, *itr; 747 struct cf_setting *fill_set, *itr_set; 748 int i; 749 750 CF_MTX_ASSERT(&sc->lock); 751 752 /* 753 * Create a new level, copy it from the old one, and update the 754 * total frequency and power by the percentage specified in the 755 * relative setting. 756 */ 757 fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT); 758 if (fill == NULL) 759 return (NULL); 760 *fill = *dup; 761 fill_set = &fill->total_set; 762 fill_set->freq = 763 ((uint64_t)fill_set->freq * set->freq) / 10000; 764 if (fill_set->power != CPUFREQ_VAL_UNKNOWN) { 765 fill_set->power = ((uint64_t)fill_set->power * set->freq) 766 / 10000; 767 } 768 if (set->lat != CPUFREQ_VAL_UNKNOWN) { 769 if (fill_set->lat != CPUFREQ_VAL_UNKNOWN) 770 fill_set->lat += set->lat; 771 else 772 fill_set->lat = set->lat; 773 } 774 CF_DEBUG("dup set considering derived setting %d\n", fill_set->freq); 775 776 /* 777 * If we copied an old level that we already modified (say, at 100%), 778 * we need to remove that setting before adding this one. Since we 779 * process each setting array in order, we know any settings for this 780 * driver will be found at the end. 781 */ 782 for (i = fill->rel_count; i != 0; i--) { 783 if (fill->rel_set[i - 1].dev != set->dev) 784 break; 785 CF_DEBUG("removed last relative driver: %s\n", 786 device_get_nameunit(set->dev)); 787 fill->rel_count--; 788 } 789 790 /* 791 * Insert the new level in sorted order. If it is a duplicate of an 792 * existing level (1) or has an absolute setting higher than the 793 * existing level (2), do not add it. We can do this since any such 794 * level is guaranteed use less power. For example (1), a level with 795 * one absolute setting of 800 Mhz uses less power than one composed 796 * of an absolute setting of 1600 Mhz and a relative setting at 50%. 797 * Also for example (2), a level of 800 Mhz/75% is preferable to 798 * 1600 Mhz/25% even though the latter has a lower total frequency. 799 */ 800 list = &sc->all_levels; 801 KASSERT(!TAILQ_EMPTY(list), ("all levels list empty in dup set")); 802 TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) { 803 itr_set = &itr->total_set; 804 if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) { 805 CF_DEBUG("dup set rejecting %d (dupe)\n", 806 fill_set->freq); 807 itr = NULL; 808 break; 809 } else if (fill_set->freq < itr_set->freq) { 810 if (fill->abs_set.freq <= itr->abs_set.freq) { 811 CF_DEBUG( 812 "dup done, inserting new level %d after %d\n", 813 fill_set->freq, itr_set->freq); 814 TAILQ_INSERT_AFTER(list, itr, fill, link); 815 sc->all_count++; 816 } else { 817 CF_DEBUG("dup set rejecting %d (abs too big)\n", 818 fill_set->freq); 819 itr = NULL; 820 } 821 break; 822 } 823 } 824 825 /* We didn't find a good place for this new level so free it. */ 826 if (itr == NULL) { 827 CF_DEBUG("dup set freeing new level %d (not optimal)\n", 828 fill_set->freq); 829 free(fill, M_TEMP); 830 fill = NULL; 831 } 832 833 return (fill); 834 } 835 836 static int 837 cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS) 838 { 839 struct cpufreq_softc *sc; 840 struct cf_level *levels; 841 int count, devcount, error, freq, i, n; 842 device_t *devs; 843 844 devs = NULL; 845 sc = oidp->oid_arg1; 846 levels = malloc(CF_MAX_LEVELS * sizeof(*levels), M_TEMP, M_NOWAIT); 847 if (levels == NULL) 848 return (ENOMEM); 849 850 error = CPUFREQ_GET(sc->dev, &levels[0]); 851 if (error) 852 goto out; 853 freq = levels[0].total_set.freq; 854 error = sysctl_handle_int(oidp, &freq, 0, req); 855 if (error != 0 || req->newptr == NULL) 856 goto out; 857 858 /* 859 * While we only call cpufreq_get() on one device (assuming all 860 * CPUs have equal levels), we call cpufreq_set() on all CPUs. 861 * This is needed for some MP systems. 862 */ 863 error = devclass_get_devices(cpufreq_dc, &devs, &devcount); 864 if (error) 865 goto out; 866 for (n = 0; n < devcount; n++) { 867 count = CF_MAX_LEVELS; 868 error = CPUFREQ_LEVELS(devs[n], levels, &count); 869 if (error) { 870 if (error == E2BIG) 871 printf( 872 "cpufreq: need to increase CF_MAX_LEVELS\n"); 873 break; 874 } 875 for (i = 0; i < count; i++) { 876 if (CPUFREQ_CMP(levels[i].total_set.freq, freq)) { 877 error = CPUFREQ_SET(devs[n], &levels[i], 878 CPUFREQ_PRIO_USER); 879 break; 880 } 881 } 882 if (i == count) { 883 error = EINVAL; 884 break; 885 } 886 } 887 888 out: 889 if (devs) 890 free(devs, M_TEMP); 891 if (levels) 892 free(levels, M_TEMP); 893 return (error); 894 } 895 896 static int 897 cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS) 898 { 899 struct cpufreq_softc *sc; 900 struct cf_level *levels; 901 struct cf_setting *set; 902 struct sbuf sb; 903 int count, error, i; 904 905 sc = oidp->oid_arg1; 906 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 907 908 /* Get settings from the device and generate the output string. */ 909 count = CF_MAX_LEVELS; 910 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 911 if (levels == NULL) 912 return (ENOMEM); 913 error = CPUFREQ_LEVELS(sc->dev, levels, &count); 914 if (error) { 915 if (error == E2BIG) 916 printf("cpufreq: need to increase CF_MAX_LEVELS\n"); 917 goto out; 918 } 919 if (count) { 920 for (i = 0; i < count; i++) { 921 set = &levels[i].total_set; 922 sbuf_printf(&sb, "%d/%d ", set->freq, set->power); 923 } 924 } else 925 sbuf_cpy(&sb, "0"); 926 sbuf_trim(&sb); 927 sbuf_finish(&sb); 928 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 929 930 out: 931 free(levels, M_TEMP); 932 sbuf_delete(&sb); 933 return (error); 934 } 935 936 static int 937 cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS) 938 { 939 device_t dev; 940 struct cf_setting *sets; 941 struct sbuf sb; 942 int error, i, set_count; 943 944 dev = oidp->oid_arg1; 945 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 946 947 /* Get settings from the device and generate the output string. */ 948 set_count = MAX_SETTINGS; 949 sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT); 950 if (sets == NULL) 951 return (ENOMEM); 952 error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count); 953 if (error) 954 goto out; 955 if (set_count) { 956 for (i = 0; i < set_count; i++) 957 sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power); 958 } else 959 sbuf_cpy(&sb, "0"); 960 sbuf_trim(&sb); 961 sbuf_finish(&sb); 962 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 963 964 out: 965 free(sets, M_TEMP); 966 sbuf_delete(&sb); 967 return (error); 968 } 969 970 int 971 cpufreq_register(device_t dev) 972 { 973 struct cpufreq_softc *sc; 974 device_t cf_dev, cpu_dev; 975 976 /* Add a sysctl to get each driver's settings separately. */ 977 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 978 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 979 OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, dev, 0, 980 cpufreq_settings_sysctl, "A", "CPU frequency driver settings"); 981 982 /* 983 * Add only one cpufreq device to each CPU. Currently, all CPUs 984 * must offer the same levels and be switched at the same time. 985 */ 986 cpu_dev = device_get_parent(dev); 987 if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) { 988 sc = device_get_softc(cf_dev); 989 sc->max_mhz = CPUFREQ_VAL_UNKNOWN; 990 return (0); 991 } 992 993 /* Add the child device and possibly sysctls. */ 994 cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", -1); 995 if (cf_dev == NULL) 996 return (ENOMEM); 997 device_quiet(cf_dev); 998 999 return (device_probe_and_attach(cf_dev)); 1000 } 1001 1002 int 1003 cpufreq_unregister(device_t dev) 1004 { 1005 device_t cf_dev, *devs; 1006 int cfcount, devcount, error, i, type; 1007 1008 /* 1009 * If this is the last cpufreq child device, remove the control 1010 * device as well. We identify cpufreq children by calling a method 1011 * they support. 1012 */ 1013 error = device_get_children(device_get_parent(dev), &devs, &devcount); 1014 if (error) 1015 return (error); 1016 cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1); 1017 if (cf_dev == NULL) { 1018 device_printf(dev, 1019 "warning: cpufreq_unregister called with no cpufreq device active\n"); 1020 return (0); 1021 } 1022 cfcount = 0; 1023 for (i = 0; i < devcount; i++) { 1024 if (!device_is_attached(devs[i])) 1025 continue; 1026 if (CPUFREQ_DRV_TYPE(devs[i], &type) == 0) 1027 cfcount++; 1028 } 1029 if (cfcount <= 1) 1030 device_delete_child(device_get_parent(cf_dev), cf_dev); 1031 free(devs, M_TEMP); 1032 1033 return (0); 1034 } 1035 1036 int 1037 cpufreq_settings_changed(device_t dev) 1038 { 1039 1040 EVENTHANDLER_INVOKE(cpufreq_levels_changed, 1041 device_get_unit(device_get_parent(dev))); 1042 return (0); 1043 } 1044