1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004-2007 Nate Lawson (SDG) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/cpu.h> 35 #include <sys/eventhandler.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/proc.h> 41 #include <sys/queue.h> 42 #include <sys/sbuf.h> 43 #include <sys/sched.h> 44 #include <sys/smp.h> 45 #include <sys/sysctl.h> 46 #include <sys/systm.h> 47 #include <sys/sx.h> 48 #include <sys/timetc.h> 49 #include <sys/taskqueue.h> 50 51 #include "cpufreq_if.h" 52 53 /* 54 * Common CPU frequency glue code. Drivers for specific hardware can 55 * attach this interface to allow users to get/set the CPU frequency. 56 */ 57 58 /* 59 * Number of levels we can handle. Levels are synthesized from settings 60 * so for M settings and N drivers, there may be M*N levels. 61 */ 62 #define CF_MAX_LEVELS 256 63 64 struct cf_saved_freq { 65 struct cf_level level; 66 int priority; 67 SLIST_ENTRY(cf_saved_freq) link; 68 }; 69 70 struct cpufreq_softc { 71 struct sx lock; 72 struct cf_level curr_level; 73 int curr_priority; 74 SLIST_HEAD(, cf_saved_freq) saved_freq; 75 struct cf_level_lst all_levels; 76 int all_count; 77 int max_mhz; 78 device_t dev; 79 device_t cf_drv_dev; 80 struct sysctl_ctx_list sysctl_ctx; 81 struct task startup_task; 82 struct cf_level *levels_buf; 83 }; 84 85 struct cf_setting_array { 86 struct cf_setting sets[MAX_SETTINGS]; 87 int count; 88 TAILQ_ENTRY(cf_setting_array) link; 89 }; 90 91 TAILQ_HEAD(cf_setting_lst, cf_setting_array); 92 93 #define CF_MTX_INIT(x) sx_init((x), "cpufreq lock") 94 #define CF_MTX_LOCK(x) sx_xlock((x)) 95 #define CF_MTX_UNLOCK(x) sx_xunlock((x)) 96 #define CF_MTX_ASSERT(x) sx_assert((x), SX_XLOCKED) 97 98 #define CF_DEBUG(msg...) do { \ 99 if (cf_verbose) \ 100 printf("cpufreq: " msg); \ 101 } while (0) 102 103 static int cpufreq_attach(device_t dev); 104 static void cpufreq_startup_task(void *ctx, int pending); 105 static int cpufreq_detach(device_t dev); 106 static int cf_set_method(device_t dev, const struct cf_level *level, 107 int priority); 108 static int cf_get_method(device_t dev, struct cf_level *level); 109 static int cf_levels_method(device_t dev, struct cf_level *levels, 110 int *count); 111 static int cpufreq_insert_abs(struct cpufreq_softc *sc, 112 struct cf_setting *sets, int count); 113 static int cpufreq_expand_set(struct cpufreq_softc *sc, 114 struct cf_setting_array *set_arr); 115 static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc, 116 struct cf_level *dup, struct cf_setting *set); 117 static int cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS); 118 static int cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS); 119 static int cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS); 120 121 static device_method_t cpufreq_methods[] = { 122 DEVMETHOD(device_probe, bus_generic_probe), 123 DEVMETHOD(device_attach, cpufreq_attach), 124 DEVMETHOD(device_detach, cpufreq_detach), 125 126 DEVMETHOD(cpufreq_set, cf_set_method), 127 DEVMETHOD(cpufreq_get, cf_get_method), 128 DEVMETHOD(cpufreq_levels, cf_levels_method), 129 {0, 0} 130 }; 131 static driver_t cpufreq_driver = { 132 "cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc) 133 }; 134 static devclass_t cpufreq_dc; 135 DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, cpufreq_dc, 0, 0); 136 137 static int cf_lowest_freq; 138 static int cf_verbose; 139 static SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL, 140 "cpufreq debugging"); 141 SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RWTUN, &cf_lowest_freq, 1, 142 "Don't provide levels below this frequency."); 143 SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RWTUN, &cf_verbose, 1, 144 "Print verbose debugging messages"); 145 146 /* 147 * This is called as the result of a hardware specific frequency control driver 148 * calling cpufreq_register. It provides a general interface for system wide 149 * frequency controls and operates on a per cpu basis. 150 */ 151 static int 152 cpufreq_attach(device_t dev) 153 { 154 struct cpufreq_softc *sc; 155 struct pcpu *pc; 156 device_t parent; 157 uint64_t rate; 158 159 CF_DEBUG("initializing %s\n", device_get_nameunit(dev)); 160 sc = device_get_softc(dev); 161 parent = device_get_parent(dev); 162 sc->dev = dev; 163 sysctl_ctx_init(&sc->sysctl_ctx); 164 TAILQ_INIT(&sc->all_levels); 165 CF_MTX_INIT(&sc->lock); 166 sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 167 SLIST_INIT(&sc->saved_freq); 168 /* Try to get nominal CPU freq to use it as maximum later if needed */ 169 sc->max_mhz = cpu_get_nominal_mhz(dev); 170 /* If that fails, try to measure the current rate */ 171 if (sc->max_mhz <= 0) { 172 CF_DEBUG("Unable to obtain nominal frequency.\n"); 173 pc = cpu_get_pcpu(dev); 174 if (cpu_est_clockrate(pc->pc_cpuid, &rate) == 0) 175 sc->max_mhz = rate / 1000000; 176 else 177 sc->max_mhz = CPUFREQ_VAL_UNKNOWN; 178 } 179 180 CF_DEBUG("initializing one-time data for %s\n", 181 device_get_nameunit(dev)); 182 sc->levels_buf = malloc(CF_MAX_LEVELS * sizeof(*sc->levels_buf), 183 M_DEVBUF, M_WAITOK); 184 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 185 SYSCTL_CHILDREN(device_get_sysctl_tree(parent)), 186 OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 187 cpufreq_curr_sysctl, "I", "Current CPU frequency"); 188 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 189 SYSCTL_CHILDREN(device_get_sysctl_tree(parent)), 190 OID_AUTO, "freq_levels", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 191 cpufreq_levels_sysctl, "A", "CPU frequency levels"); 192 193 /* 194 * Queue a one-shot broadcast that levels have changed. 195 * It will run once the system has completed booting. 196 */ 197 TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, dev); 198 taskqueue_enqueue(taskqueue_thread, &sc->startup_task); 199 200 return (0); 201 } 202 203 /* Handle any work to be done for all drivers that attached during boot. */ 204 static void 205 cpufreq_startup_task(void *ctx, int pending) 206 { 207 208 cpufreq_settings_changed((device_t)ctx); 209 } 210 211 static int 212 cpufreq_detach(device_t dev) 213 { 214 struct cpufreq_softc *sc; 215 struct cf_saved_freq *saved_freq; 216 217 CF_DEBUG("shutdown %s\n", device_get_nameunit(dev)); 218 sc = device_get_softc(dev); 219 sysctl_ctx_free(&sc->sysctl_ctx); 220 221 while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) { 222 SLIST_REMOVE_HEAD(&sc->saved_freq, link); 223 free(saved_freq, M_TEMP); 224 } 225 226 free(sc->levels_buf, M_DEVBUF); 227 228 return (0); 229 } 230 231 static int 232 cf_set_method(device_t dev, const struct cf_level *level, int priority) 233 { 234 struct cpufreq_softc *sc; 235 const struct cf_setting *set; 236 struct cf_saved_freq *saved_freq, *curr_freq; 237 struct pcpu *pc; 238 int error, i; 239 u_char pri; 240 241 sc = device_get_softc(dev); 242 error = 0; 243 set = NULL; 244 saved_freq = NULL; 245 246 /* We are going to change levels so notify the pre-change handler. */ 247 EVENTHANDLER_INVOKE(cpufreq_pre_change, level, &error); 248 if (error != 0) { 249 EVENTHANDLER_INVOKE(cpufreq_post_change, level, error); 250 return (error); 251 } 252 253 CF_MTX_LOCK(&sc->lock); 254 255 #ifdef SMP 256 #ifdef EARLY_AP_STARTUP 257 MPASS(mp_ncpus == 1 || smp_started); 258 #else 259 /* 260 * If still booting and secondary CPUs not started yet, don't allow 261 * changing the frequency until they're online. This is because we 262 * can't switch to them using sched_bind() and thus we'd only be 263 * switching the main CPU. XXXTODO: Need to think more about how to 264 * handle having different CPUs at different frequencies. 265 */ 266 if (mp_ncpus > 1 && !smp_started) { 267 device_printf(dev, "rejecting change, SMP not started yet\n"); 268 error = ENXIO; 269 goto out; 270 } 271 #endif 272 #endif /* SMP */ 273 274 /* 275 * If the requested level has a lower priority, don't allow 276 * the new level right now. 277 */ 278 if (priority < sc->curr_priority) { 279 CF_DEBUG("ignoring, curr prio %d less than %d\n", priority, 280 sc->curr_priority); 281 error = EPERM; 282 goto out; 283 } 284 285 /* 286 * If the caller didn't specify a level and one is saved, prepare to 287 * restore the saved level. If none has been saved, return an error. 288 */ 289 if (level == NULL) { 290 saved_freq = SLIST_FIRST(&sc->saved_freq); 291 if (saved_freq == NULL) { 292 CF_DEBUG("NULL level, no saved level\n"); 293 error = ENXIO; 294 goto out; 295 } 296 level = &saved_freq->level; 297 priority = saved_freq->priority; 298 CF_DEBUG("restoring saved level, freq %d prio %d\n", 299 level->total_set.freq, priority); 300 } 301 302 /* Reject levels that are below our specified threshold. */ 303 if (level->total_set.freq < cf_lowest_freq) { 304 CF_DEBUG("rejecting freq %d, less than %d limit\n", 305 level->total_set.freq, cf_lowest_freq); 306 error = EINVAL; 307 goto out; 308 } 309 310 /* If already at this level, just return. */ 311 if (sc->curr_level.total_set.freq == level->total_set.freq) { 312 CF_DEBUG("skipping freq %d, same as current level %d\n", 313 level->total_set.freq, sc->curr_level.total_set.freq); 314 goto skip; 315 } 316 317 /* First, set the absolute frequency via its driver. */ 318 set = &level->abs_set; 319 if (set->dev) { 320 if (!device_is_attached(set->dev)) { 321 error = ENXIO; 322 goto out; 323 } 324 325 /* Bind to the target CPU before switching. */ 326 pc = cpu_get_pcpu(set->dev); 327 thread_lock(curthread); 328 pri = curthread->td_priority; 329 sched_prio(curthread, PRI_MIN); 330 sched_bind(curthread, pc->pc_cpuid); 331 thread_unlock(curthread); 332 CF_DEBUG("setting abs freq %d on %s (cpu %d)\n", set->freq, 333 device_get_nameunit(set->dev), PCPU_GET(cpuid)); 334 error = CPUFREQ_DRV_SET(set->dev, set); 335 thread_lock(curthread); 336 sched_unbind(curthread); 337 sched_prio(curthread, pri); 338 thread_unlock(curthread); 339 if (error) { 340 goto out; 341 } 342 } 343 344 /* Next, set any/all relative frequencies via their drivers. */ 345 for (i = 0; i < level->rel_count; i++) { 346 set = &level->rel_set[i]; 347 if (!device_is_attached(set->dev)) { 348 error = ENXIO; 349 goto out; 350 } 351 352 /* Bind to the target CPU before switching. */ 353 pc = cpu_get_pcpu(set->dev); 354 thread_lock(curthread); 355 pri = curthread->td_priority; 356 sched_prio(curthread, PRI_MIN); 357 sched_bind(curthread, pc->pc_cpuid); 358 thread_unlock(curthread); 359 CF_DEBUG("setting rel freq %d on %s (cpu %d)\n", set->freq, 360 device_get_nameunit(set->dev), PCPU_GET(cpuid)); 361 error = CPUFREQ_DRV_SET(set->dev, set); 362 thread_lock(curthread); 363 sched_unbind(curthread); 364 sched_prio(curthread, pri); 365 thread_unlock(curthread); 366 if (error) { 367 /* XXX Back out any successful setting? */ 368 goto out; 369 } 370 } 371 372 skip: 373 /* 374 * Before recording the current level, check if we're going to a 375 * higher priority. If so, save the previous level and priority. 376 */ 377 if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN && 378 priority > sc->curr_priority) { 379 CF_DEBUG("saving level, freq %d prio %d\n", 380 sc->curr_level.total_set.freq, sc->curr_priority); 381 curr_freq = malloc(sizeof(*curr_freq), M_TEMP, M_NOWAIT); 382 if (curr_freq == NULL) { 383 error = ENOMEM; 384 goto out; 385 } 386 curr_freq->level = sc->curr_level; 387 curr_freq->priority = sc->curr_priority; 388 SLIST_INSERT_HEAD(&sc->saved_freq, curr_freq, link); 389 } 390 sc->curr_level = *level; 391 sc->curr_priority = priority; 392 393 /* If we were restoring a saved state, reset it to "unused". */ 394 if (saved_freq != NULL) { 395 CF_DEBUG("resetting saved level\n"); 396 sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 397 SLIST_REMOVE_HEAD(&sc->saved_freq, link); 398 free(saved_freq, M_TEMP); 399 } 400 401 out: 402 CF_MTX_UNLOCK(&sc->lock); 403 404 /* 405 * We changed levels (or attempted to) so notify the post-change 406 * handler of new frequency or error. 407 */ 408 EVENTHANDLER_INVOKE(cpufreq_post_change, level, error); 409 if (error && set) 410 device_printf(set->dev, "set freq failed, err %d\n", error); 411 412 return (error); 413 } 414 415 static int 416 cpufreq_get_frequency(device_t dev) 417 { 418 struct cf_setting set; 419 420 if (CPUFREQ_DRV_GET(dev, &set) != 0) 421 return (-1); 422 423 return (set.freq); 424 } 425 426 /* Returns the index into *levels with the match */ 427 static int 428 cpufreq_get_level(device_t dev, struct cf_level *levels, int count) 429 { 430 int i, freq; 431 432 if ((freq = cpufreq_get_frequency(dev)) < 0) 433 return (-1); 434 for (i = 0; i < count; i++) 435 if (freq == levels[i].total_set.freq) 436 return (i); 437 438 return (-1); 439 } 440 441 /* 442 * Used by the cpufreq core, this function will populate *level with the current 443 * frequency as either determined by a cached value sc->curr_level, or in the 444 * case the lower level driver has set the CPUFREQ_FLAG_UNCACHED flag, it will 445 * obtain the frequency from the driver itself. 446 */ 447 static int 448 cf_get_method(device_t dev, struct cf_level *level) 449 { 450 struct cpufreq_softc *sc; 451 struct cf_level *levels; 452 struct cf_setting *curr_set; 453 struct pcpu *pc; 454 int bdiff, count, diff, error, i, type; 455 uint64_t rate; 456 457 sc = device_get_softc(dev); 458 error = 0; 459 levels = NULL; 460 461 /* 462 * If we already know the current frequency, and the driver didn't ask 463 * for uncached usage, we're done. 464 */ 465 CF_MTX_LOCK(&sc->lock); 466 curr_set = &sc->curr_level.total_set; 467 error = CPUFREQ_DRV_TYPE(sc->cf_drv_dev, &type); 468 if (error == 0 && (type & CPUFREQ_FLAG_UNCACHED)) { 469 struct cf_setting set; 470 471 /* 472 * If the driver wants to always report back the real frequency, 473 * first try the driver and if that fails, fall back to 474 * estimating. 475 */ 476 if (CPUFREQ_DRV_GET(sc->cf_drv_dev, &set) != 0) 477 goto estimate; 478 sc->curr_level.total_set = set; 479 CF_DEBUG("get returning immediate freq %d\n", curr_set->freq); 480 goto out; 481 } else if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) { 482 CF_DEBUG("get returning known freq %d\n", curr_set->freq); 483 error = 0; 484 goto out; 485 } 486 CF_MTX_UNLOCK(&sc->lock); 487 488 /* 489 * We need to figure out the current level. Loop through every 490 * driver, getting the current setting. Then, attempt to get a best 491 * match of settings against each level. 492 */ 493 count = CF_MAX_LEVELS; 494 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 495 if (levels == NULL) 496 return (ENOMEM); 497 error = CPUFREQ_LEVELS(sc->dev, levels, &count); 498 if (error) { 499 if (error == E2BIG) 500 printf("cpufreq: need to increase CF_MAX_LEVELS\n"); 501 free(levels, M_TEMP); 502 return (error); 503 } 504 505 /* 506 * Reacquire the lock and search for the given level. 507 * 508 * XXX Note: this is not quite right since we really need to go 509 * through each level and compare both absolute and relative 510 * settings for each driver in the system before making a match. 511 * The estimation code below catches this case though. 512 */ 513 CF_MTX_LOCK(&sc->lock); 514 i = cpufreq_get_level(sc->cf_drv_dev, levels, count); 515 if (i >= 0) 516 sc->curr_level = levels[i]; 517 else 518 CF_DEBUG("Couldn't find supported level for %s\n", 519 device_get_nameunit(sc->cf_drv_dev)); 520 521 if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) { 522 CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq); 523 goto out; 524 } 525 526 estimate: 527 CF_MTX_ASSERT(&sc->lock); 528 529 /* 530 * We couldn't find an exact match, so attempt to estimate and then 531 * match against a level. 532 */ 533 pc = cpu_get_pcpu(dev); 534 if (pc == NULL) { 535 error = ENXIO; 536 goto out; 537 } 538 cpu_est_clockrate(pc->pc_cpuid, &rate); 539 rate /= 1000000; 540 bdiff = 1 << 30; 541 for (i = 0; i < count; i++) { 542 diff = abs(levels[i].total_set.freq - rate); 543 if (diff < bdiff) { 544 bdiff = diff; 545 sc->curr_level = levels[i]; 546 } 547 } 548 CF_DEBUG("get estimated freq %d\n", curr_set->freq); 549 550 out: 551 if (error == 0) 552 *level = sc->curr_level; 553 554 CF_MTX_UNLOCK(&sc->lock); 555 if (levels) 556 free(levels, M_TEMP); 557 return (error); 558 } 559 560 /* 561 * Either directly obtain settings from the cpufreq driver, or build a list of 562 * relative settings to be integrated later against an absolute max. 563 */ 564 static int 565 cpufreq_add_levels(device_t cf_dev, struct cf_setting_lst *rel_sets) 566 { 567 struct cf_setting_array *set_arr; 568 struct cf_setting *sets; 569 device_t dev; 570 struct cpufreq_softc *sc; 571 int type, set_count, error; 572 573 sc = device_get_softc(cf_dev); 574 dev = sc->cf_drv_dev; 575 576 /* Skip devices that aren't ready. */ 577 if (!device_is_attached(cf_dev)) 578 return (0); 579 580 /* 581 * Get settings, skipping drivers that offer no settings or 582 * provide settings for informational purposes only. 583 */ 584 error = CPUFREQ_DRV_TYPE(dev, &type); 585 if (error != 0 || (type & CPUFREQ_FLAG_INFO_ONLY)) { 586 if (error == 0) { 587 CF_DEBUG("skipping info-only driver %s\n", 588 device_get_nameunit(cf_dev)); 589 } 590 return (error); 591 } 592 593 sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT); 594 if (sets == NULL) 595 return (ENOMEM); 596 597 set_count = MAX_SETTINGS; 598 error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count); 599 if (error != 0 || set_count == 0) 600 goto out; 601 602 /* Add the settings to our absolute/relative lists. */ 603 switch (type & CPUFREQ_TYPE_MASK) { 604 case CPUFREQ_TYPE_ABSOLUTE: 605 error = cpufreq_insert_abs(sc, sets, set_count); 606 break; 607 case CPUFREQ_TYPE_RELATIVE: 608 CF_DEBUG("adding %d relative settings\n", set_count); 609 set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT); 610 if (set_arr == NULL) { 611 error = ENOMEM; 612 goto out; 613 } 614 bcopy(sets, set_arr->sets, set_count * sizeof(*sets)); 615 set_arr->count = set_count; 616 TAILQ_INSERT_TAIL(rel_sets, set_arr, link); 617 break; 618 default: 619 error = EINVAL; 620 } 621 622 out: 623 free(sets, M_TEMP); 624 return (error); 625 } 626 627 static int 628 cf_levels_method(device_t dev, struct cf_level *levels, int *count) 629 { 630 struct cf_setting_array *set_arr; 631 struct cf_setting_lst rel_sets; 632 struct cpufreq_softc *sc; 633 struct cf_level *lev; 634 struct pcpu *pc; 635 int error, i; 636 uint64_t rate; 637 638 if (levels == NULL || count == NULL) 639 return (EINVAL); 640 641 TAILQ_INIT(&rel_sets); 642 sc = device_get_softc(dev); 643 644 CF_MTX_LOCK(&sc->lock); 645 error = cpufreq_add_levels(sc->dev, &rel_sets); 646 if (error) 647 goto out; 648 649 /* 650 * If there are no absolute levels, create a fake one at 100%. We 651 * then cache the clockrate for later use as our base frequency. 652 */ 653 if (TAILQ_EMPTY(&sc->all_levels)) { 654 struct cf_setting set; 655 656 CF_DEBUG("No absolute levels returned by driver\n"); 657 658 if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) { 659 sc->max_mhz = cpu_get_nominal_mhz(dev); 660 /* 661 * If the CPU can't report a rate for 100%, hope 662 * the CPU is running at its nominal rate right now, 663 * and use that instead. 664 */ 665 if (sc->max_mhz <= 0) { 666 pc = cpu_get_pcpu(dev); 667 cpu_est_clockrate(pc->pc_cpuid, &rate); 668 sc->max_mhz = rate / 1000000; 669 } 670 } 671 memset(&set, CPUFREQ_VAL_UNKNOWN, sizeof(set)); 672 set.freq = sc->max_mhz; 673 set.dev = NULL; 674 error = cpufreq_insert_abs(sc, &set, 1); 675 if (error) 676 goto out; 677 } 678 679 /* Create a combined list of absolute + relative levels. */ 680 TAILQ_FOREACH(set_arr, &rel_sets, link) 681 cpufreq_expand_set(sc, set_arr); 682 683 /* If the caller doesn't have enough space, return the actual count. */ 684 if (sc->all_count > *count) { 685 *count = sc->all_count; 686 error = E2BIG; 687 goto out; 688 } 689 690 /* Finally, output the list of levels. */ 691 i = 0; 692 TAILQ_FOREACH(lev, &sc->all_levels, link) { 693 694 /* Skip levels that have a frequency that is too low. */ 695 if (lev->total_set.freq < cf_lowest_freq) { 696 sc->all_count--; 697 continue; 698 } 699 700 levels[i] = *lev; 701 i++; 702 } 703 *count = sc->all_count; 704 error = 0; 705 706 out: 707 /* Clear all levels since we regenerate them each time. */ 708 while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) { 709 TAILQ_REMOVE(&sc->all_levels, lev, link); 710 free(lev, M_TEMP); 711 } 712 sc->all_count = 0; 713 714 CF_MTX_UNLOCK(&sc->lock); 715 while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) { 716 TAILQ_REMOVE(&rel_sets, set_arr, link); 717 free(set_arr, M_TEMP); 718 } 719 return (error); 720 } 721 722 /* 723 * Create levels for an array of absolute settings and insert them in 724 * sorted order in the specified list. 725 */ 726 static int 727 cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets, 728 int count) 729 { 730 struct cf_level_lst *list; 731 struct cf_level *level, *search; 732 int i, inserted; 733 734 CF_MTX_ASSERT(&sc->lock); 735 736 list = &sc->all_levels; 737 for (i = 0; i < count; i++) { 738 level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO); 739 if (level == NULL) 740 return (ENOMEM); 741 level->abs_set = sets[i]; 742 level->total_set = sets[i]; 743 level->total_set.dev = NULL; 744 sc->all_count++; 745 inserted = 0; 746 747 if (TAILQ_EMPTY(list)) { 748 CF_DEBUG("adding abs setting %d at head\n", 749 sets[i].freq); 750 TAILQ_INSERT_HEAD(list, level, link); 751 continue; 752 } 753 754 TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link) 755 if (sets[i].freq <= search->total_set.freq) { 756 CF_DEBUG("adding abs setting %d after %d\n", 757 sets[i].freq, search->total_set.freq); 758 TAILQ_INSERT_AFTER(list, search, level, link); 759 inserted = 1; 760 break; 761 } 762 763 if (inserted == 0) { 764 TAILQ_FOREACH(search, list, link) 765 if (sets[i].freq >= search->total_set.freq) { 766 CF_DEBUG("adding abs setting %d before %d\n", 767 sets[i].freq, search->total_set.freq); 768 TAILQ_INSERT_BEFORE(search, level, link); 769 break; 770 } 771 } 772 } 773 774 return (0); 775 } 776 777 /* 778 * Expand a group of relative settings, creating derived levels from them. 779 */ 780 static int 781 cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr) 782 { 783 struct cf_level *fill, *search; 784 struct cf_setting *set; 785 int i; 786 787 CF_MTX_ASSERT(&sc->lock); 788 789 /* 790 * Walk the set of all existing levels in reverse. This is so we 791 * create derived states from the lowest absolute settings first 792 * and discard duplicates created from higher absolute settings. 793 * For instance, a level of 50 Mhz derived from 100 Mhz + 50% is 794 * preferable to 200 Mhz + 25% because absolute settings are more 795 * efficient since they often change the voltage as well. 796 */ 797 TAILQ_FOREACH_REVERSE(search, &sc->all_levels, cf_level_lst, link) { 798 /* Add each setting to the level, duplicating if necessary. */ 799 for (i = 0; i < set_arr->count; i++) { 800 set = &set_arr->sets[i]; 801 802 /* 803 * If this setting is less than 100%, split the level 804 * into two and add this setting to the new level. 805 */ 806 fill = search; 807 if (set->freq < 10000) { 808 fill = cpufreq_dup_set(sc, search, set); 809 810 /* 811 * The new level was a duplicate of an existing 812 * level or its absolute setting is too high 813 * so we freed it. For example, we discard a 814 * derived level of 1000 MHz/25% if a level 815 * of 500 MHz/100% already exists. 816 */ 817 if (fill == NULL) 818 break; 819 } 820 821 /* Add this setting to the existing or new level. */ 822 KASSERT(fill->rel_count < MAX_SETTINGS, 823 ("cpufreq: too many relative drivers (%d)", 824 MAX_SETTINGS)); 825 fill->rel_set[fill->rel_count] = *set; 826 fill->rel_count++; 827 CF_DEBUG( 828 "expand set added rel setting %d%% to %d level\n", 829 set->freq / 100, fill->total_set.freq); 830 } 831 } 832 833 return (0); 834 } 835 836 static struct cf_level * 837 cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup, 838 struct cf_setting *set) 839 { 840 struct cf_level_lst *list; 841 struct cf_level *fill, *itr; 842 struct cf_setting *fill_set, *itr_set; 843 int i; 844 845 CF_MTX_ASSERT(&sc->lock); 846 847 /* 848 * Create a new level, copy it from the old one, and update the 849 * total frequency and power by the percentage specified in the 850 * relative setting. 851 */ 852 fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT); 853 if (fill == NULL) 854 return (NULL); 855 *fill = *dup; 856 fill_set = &fill->total_set; 857 fill_set->freq = 858 ((uint64_t)fill_set->freq * set->freq) / 10000; 859 if (fill_set->power != CPUFREQ_VAL_UNKNOWN) { 860 fill_set->power = ((uint64_t)fill_set->power * set->freq) 861 / 10000; 862 } 863 if (set->lat != CPUFREQ_VAL_UNKNOWN) { 864 if (fill_set->lat != CPUFREQ_VAL_UNKNOWN) 865 fill_set->lat += set->lat; 866 else 867 fill_set->lat = set->lat; 868 } 869 CF_DEBUG("dup set considering derived setting %d\n", fill_set->freq); 870 871 /* 872 * If we copied an old level that we already modified (say, at 100%), 873 * we need to remove that setting before adding this one. Since we 874 * process each setting array in order, we know any settings for this 875 * driver will be found at the end. 876 */ 877 for (i = fill->rel_count; i != 0; i--) { 878 if (fill->rel_set[i - 1].dev != set->dev) 879 break; 880 CF_DEBUG("removed last relative driver: %s\n", 881 device_get_nameunit(set->dev)); 882 fill->rel_count--; 883 } 884 885 /* 886 * Insert the new level in sorted order. If it is a duplicate of an 887 * existing level (1) or has an absolute setting higher than the 888 * existing level (2), do not add it. We can do this since any such 889 * level is guaranteed use less power. For example (1), a level with 890 * one absolute setting of 800 Mhz uses less power than one composed 891 * of an absolute setting of 1600 Mhz and a relative setting at 50%. 892 * Also for example (2), a level of 800 Mhz/75% is preferable to 893 * 1600 Mhz/25% even though the latter has a lower total frequency. 894 */ 895 list = &sc->all_levels; 896 KASSERT(!TAILQ_EMPTY(list), ("all levels list empty in dup set")); 897 TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) { 898 itr_set = &itr->total_set; 899 if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) { 900 CF_DEBUG("dup set rejecting %d (dupe)\n", 901 fill_set->freq); 902 itr = NULL; 903 break; 904 } else if (fill_set->freq < itr_set->freq) { 905 if (fill->abs_set.freq <= itr->abs_set.freq) { 906 CF_DEBUG( 907 "dup done, inserting new level %d after %d\n", 908 fill_set->freq, itr_set->freq); 909 TAILQ_INSERT_AFTER(list, itr, fill, link); 910 sc->all_count++; 911 } else { 912 CF_DEBUG("dup set rejecting %d (abs too big)\n", 913 fill_set->freq); 914 itr = NULL; 915 } 916 break; 917 } 918 } 919 920 /* We didn't find a good place for this new level so free it. */ 921 if (itr == NULL) { 922 CF_DEBUG("dup set freeing new level %d (not optimal)\n", 923 fill_set->freq); 924 free(fill, M_TEMP); 925 fill = NULL; 926 } 927 928 return (fill); 929 } 930 931 static int 932 cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS) 933 { 934 struct cpufreq_softc *sc; 935 struct cf_level *levels; 936 int best, count, diff, bdiff, devcount, error, freq, i, n; 937 device_t *devs; 938 939 devs = NULL; 940 sc = oidp->oid_arg1; 941 levels = sc->levels_buf; 942 943 error = CPUFREQ_GET(sc->dev, &levels[0]); 944 if (error) 945 goto out; 946 freq = levels[0].total_set.freq; 947 error = sysctl_handle_int(oidp, &freq, 0, req); 948 if (error != 0 || req->newptr == NULL) 949 goto out; 950 951 /* 952 * While we only call cpufreq_get() on one device (assuming all 953 * CPUs have equal levels), we call cpufreq_set() on all CPUs. 954 * This is needed for some MP systems. 955 */ 956 error = devclass_get_devices(cpufreq_dc, &devs, &devcount); 957 if (error) 958 goto out; 959 for (n = 0; n < devcount; n++) { 960 count = CF_MAX_LEVELS; 961 error = CPUFREQ_LEVELS(devs[n], levels, &count); 962 if (error) { 963 if (error == E2BIG) 964 printf( 965 "cpufreq: need to increase CF_MAX_LEVELS\n"); 966 break; 967 } 968 best = 0; 969 bdiff = 1 << 30; 970 for (i = 0; i < count; i++) { 971 diff = abs(levels[i].total_set.freq - freq); 972 if (diff < bdiff) { 973 bdiff = diff; 974 best = i; 975 } 976 } 977 error = CPUFREQ_SET(devs[n], &levels[best], CPUFREQ_PRIO_USER); 978 } 979 980 out: 981 if (devs) 982 free(devs, M_TEMP); 983 return (error); 984 } 985 986 static int 987 cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS) 988 { 989 struct cpufreq_softc *sc; 990 struct cf_level *levels; 991 struct cf_setting *set; 992 struct sbuf sb; 993 int count, error, i; 994 995 sc = oidp->oid_arg1; 996 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 997 998 /* Get settings from the device and generate the output string. */ 999 count = CF_MAX_LEVELS; 1000 levels = sc->levels_buf; 1001 if (levels == NULL) { 1002 sbuf_delete(&sb); 1003 return (ENOMEM); 1004 } 1005 error = CPUFREQ_LEVELS(sc->dev, levels, &count); 1006 if (error) { 1007 if (error == E2BIG) 1008 printf("cpufreq: need to increase CF_MAX_LEVELS\n"); 1009 goto out; 1010 } 1011 if (count) { 1012 for (i = 0; i < count; i++) { 1013 set = &levels[i].total_set; 1014 sbuf_printf(&sb, "%d/%d ", set->freq, set->power); 1015 } 1016 } else 1017 sbuf_cpy(&sb, "0"); 1018 sbuf_trim(&sb); 1019 sbuf_finish(&sb); 1020 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 1021 1022 out: 1023 sbuf_delete(&sb); 1024 return (error); 1025 } 1026 1027 static int 1028 cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS) 1029 { 1030 device_t dev; 1031 struct cf_setting *sets; 1032 struct sbuf sb; 1033 int error, i, set_count; 1034 1035 dev = oidp->oid_arg1; 1036 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 1037 1038 /* Get settings from the device and generate the output string. */ 1039 set_count = MAX_SETTINGS; 1040 sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT); 1041 if (sets == NULL) { 1042 sbuf_delete(&sb); 1043 return (ENOMEM); 1044 } 1045 error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count); 1046 if (error) 1047 goto out; 1048 if (set_count) { 1049 for (i = 0; i < set_count; i++) 1050 sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power); 1051 } else 1052 sbuf_cpy(&sb, "0"); 1053 sbuf_trim(&sb); 1054 sbuf_finish(&sb); 1055 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 1056 1057 out: 1058 free(sets, M_TEMP); 1059 sbuf_delete(&sb); 1060 return (error); 1061 } 1062 1063 static void 1064 cpufreq_add_freq_driver_sysctl(device_t cf_dev) 1065 { 1066 struct cpufreq_softc *sc; 1067 1068 sc = device_get_softc(cf_dev); 1069 SYSCTL_ADD_CONST_STRING(&sc->sysctl_ctx, 1070 SYSCTL_CHILDREN(device_get_sysctl_tree(cf_dev)), OID_AUTO, 1071 "freq_driver", CTLFLAG_RD, device_get_nameunit(sc->cf_drv_dev), 1072 "cpufreq driver used by this cpu"); 1073 } 1074 1075 int 1076 cpufreq_register(device_t dev) 1077 { 1078 struct cpufreq_softc *sc; 1079 device_t cf_dev, cpu_dev; 1080 int error; 1081 1082 /* Add a sysctl to get each driver's settings separately. */ 1083 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1084 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1085 OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, dev, 0, 1086 cpufreq_settings_sysctl, "A", "CPU frequency driver settings"); 1087 1088 /* 1089 * Add only one cpufreq device to each CPU. Currently, all CPUs 1090 * must offer the same levels and be switched at the same time. 1091 */ 1092 cpu_dev = device_get_parent(dev); 1093 if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) { 1094 sc = device_get_softc(cf_dev); 1095 sc->max_mhz = CPUFREQ_VAL_UNKNOWN; 1096 MPASS(sc->cf_drv_dev != NULL); 1097 return (0); 1098 } 1099 1100 /* Add the child device and possibly sysctls. */ 1101 cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", -1); 1102 if (cf_dev == NULL) 1103 return (ENOMEM); 1104 device_quiet(cf_dev); 1105 1106 error = device_probe_and_attach(cf_dev); 1107 if (error) 1108 return (error); 1109 1110 sc = device_get_softc(cf_dev); 1111 sc->cf_drv_dev = dev; 1112 cpufreq_add_freq_driver_sysctl(cf_dev); 1113 return (error); 1114 } 1115 1116 int 1117 cpufreq_unregister(device_t dev) 1118 { 1119 device_t cf_dev; 1120 struct cpufreq_softc *sc; 1121 1122 /* 1123 * If this is the last cpufreq child device, remove the control 1124 * device as well. We identify cpufreq children by calling a method 1125 * they support. 1126 */ 1127 cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1); 1128 if (cf_dev == NULL) { 1129 device_printf(dev, 1130 "warning: cpufreq_unregister called with no cpufreq device active\n"); 1131 return (0); 1132 } 1133 sc = device_get_softc(cf_dev); 1134 MPASS(sc->cf_drv_dev == dev); 1135 device_delete_child(device_get_parent(cf_dev), cf_dev); 1136 1137 return (0); 1138 } 1139 1140 int 1141 cpufreq_settings_changed(device_t dev) 1142 { 1143 1144 EVENTHANDLER_INVOKE(cpufreq_levels_changed, 1145 device_get_unit(device_get_parent(dev))); 1146 return (0); 1147 } 1148