1 /*- 2 * Copyright (c) 2004-2005 Nate Lawson (SDG) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/cpu.h> 33 #include <sys/eventhandler.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/proc.h> 39 #include <sys/queue.h> 40 #include <sys/sched.h> 41 #include <sys/sysctl.h> 42 #include <sys/systm.h> 43 #include <sys/sbuf.h> 44 #include <sys/sx.h> 45 #include <sys/timetc.h> 46 47 #include "cpufreq_if.h" 48 49 /* 50 * Common CPU frequency glue code. Drivers for specific hardware can 51 * attach this interface to allow users to get/set the CPU frequency. 52 */ 53 54 /* 55 * Number of levels we can handle. Levels are synthesized from settings 56 * so for M settings and N drivers, there may be M*N levels. 57 */ 58 #define CF_MAX_LEVELS 64 59 60 struct cf_saved_freq { 61 struct cf_level level; 62 int priority; 63 SLIST_ENTRY(cf_saved_freq) link; 64 }; 65 66 struct cpufreq_softc { 67 struct sx lock; 68 struct cf_level curr_level; 69 int curr_priority; 70 SLIST_HEAD(, cf_saved_freq) saved_freq; 71 struct cf_level_lst all_levels; 72 int all_count; 73 int max_mhz; 74 device_t dev; 75 struct sysctl_ctx_list sysctl_ctx; 76 }; 77 78 struct cf_setting_array { 79 struct cf_setting sets[MAX_SETTINGS]; 80 int count; 81 TAILQ_ENTRY(cf_setting_array) link; 82 }; 83 84 TAILQ_HEAD(cf_setting_lst, cf_setting_array); 85 86 #define CF_MTX_INIT(x) sx_init((x), "cpufreq lock") 87 #define CF_MTX_LOCK(x) sx_xlock((x)) 88 #define CF_MTX_UNLOCK(x) sx_xunlock((x)) 89 #define CF_MTX_ASSERT(x) sx_assert((x), SX_XLOCKED) 90 91 #define CF_DEBUG(msg...) do { \ 92 if (cf_verbose) \ 93 printf("cpufreq: " msg); \ 94 } while (0) 95 96 static int cpufreq_attach(device_t dev); 97 static int cpufreq_detach(device_t dev); 98 static void cpufreq_evaluate(void *arg); 99 static int cf_set_method(device_t dev, const struct cf_level *level, 100 int priority); 101 static int cf_get_method(device_t dev, struct cf_level *level); 102 static int cf_levels_method(device_t dev, struct cf_level *levels, 103 int *count); 104 static int cpufreq_insert_abs(struct cpufreq_softc *sc, 105 struct cf_setting *sets, int count); 106 static int cpufreq_expand_set(struct cpufreq_softc *sc, 107 struct cf_setting_array *set_arr); 108 static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc, 109 struct cf_level *dup, struct cf_setting *set); 110 static int cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS); 111 static int cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS); 112 static int cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS); 113 114 static device_method_t cpufreq_methods[] = { 115 DEVMETHOD(device_probe, bus_generic_probe), 116 DEVMETHOD(device_attach, cpufreq_attach), 117 DEVMETHOD(device_detach, cpufreq_detach), 118 119 DEVMETHOD(cpufreq_set, cf_set_method), 120 DEVMETHOD(cpufreq_get, cf_get_method), 121 DEVMETHOD(cpufreq_levels, cf_levels_method), 122 {0, 0} 123 }; 124 static driver_t cpufreq_driver = { 125 "cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc) 126 }; 127 static devclass_t cpufreq_dc; 128 DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, cpufreq_dc, 0, 0); 129 130 static eventhandler_tag cf_ev_tag; 131 132 static int cf_lowest_freq; 133 static int cf_verbose; 134 TUNABLE_INT("debug.cpufreq.lowest", &cf_lowest_freq); 135 TUNABLE_INT("debug.cpufreq.verbose", &cf_verbose); 136 SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL, "cpufreq debugging"); 137 SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RW, &cf_lowest_freq, 1, 138 "Don't provide levels below this frequency."); 139 SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RW, &cf_verbose, 1, 140 "Print verbose debugging messages"); 141 142 static int 143 cpufreq_attach(device_t dev) 144 { 145 struct cpufreq_softc *sc; 146 device_t parent; 147 int numdevs; 148 149 CF_DEBUG("initializing %s\n", device_get_nameunit(dev)); 150 sc = device_get_softc(dev); 151 parent = device_get_parent(dev); 152 sc->dev = dev; 153 sysctl_ctx_init(&sc->sysctl_ctx); 154 TAILQ_INIT(&sc->all_levels); 155 CF_MTX_INIT(&sc->lock); 156 sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 157 SLIST_INIT(&sc->saved_freq); 158 sc->max_mhz = CPUFREQ_VAL_UNKNOWN; 159 160 /* 161 * Only initialize one set of sysctls for all CPUs. In the future, 162 * if multiple CPUs can have different settings, we can move these 163 * sysctls to be under every CPU instead of just the first one. 164 */ 165 numdevs = devclass_get_count(cpufreq_dc); 166 if (numdevs > 1) 167 return (0); 168 169 CF_DEBUG("initializing one-time data for %s\n", 170 device_get_nameunit(dev)); 171 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 172 SYSCTL_CHILDREN(device_get_sysctl_tree(parent)), 173 OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 174 cpufreq_curr_sysctl, "I", "Current CPU frequency"); 175 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 176 SYSCTL_CHILDREN(device_get_sysctl_tree(parent)), 177 OID_AUTO, "freq_levels", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 178 cpufreq_levels_sysctl, "A", "CPU frequency levels"); 179 cf_ev_tag = EVENTHANDLER_REGISTER(cpufreq_changed, cpufreq_evaluate, 180 NULL, EVENTHANDLER_PRI_ANY); 181 182 return (0); 183 } 184 185 static int 186 cpufreq_detach(device_t dev) 187 { 188 struct cpufreq_softc *sc; 189 struct cf_saved_freq *saved_freq; 190 int numdevs; 191 192 CF_DEBUG("shutdown %s\n", device_get_nameunit(dev)); 193 sc = device_get_softc(dev); 194 sysctl_ctx_free(&sc->sysctl_ctx); 195 196 while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) { 197 SLIST_REMOVE_HEAD(&sc->saved_freq, link); 198 free(saved_freq, M_TEMP); 199 } 200 201 /* Only clean up these resources when the last device is detaching. */ 202 numdevs = devclass_get_count(cpufreq_dc); 203 if (numdevs == 1) { 204 CF_DEBUG("final shutdown for %s\n", device_get_nameunit(dev)); 205 EVENTHANDLER_DEREGISTER(cpufreq_changed, cf_ev_tag); 206 } 207 208 return (0); 209 } 210 211 static void 212 cpufreq_evaluate(void *arg) 213 { 214 /* TODO: Re-evaluate when notified of changes to drivers. */ 215 } 216 217 static int 218 cf_set_method(device_t dev, const struct cf_level *level, int priority) 219 { 220 struct cpufreq_softc *sc; 221 const struct cf_setting *set; 222 struct cf_saved_freq *saved_freq, *curr_freq; 223 struct pcpu *pc; 224 int cpu_id, error, i; 225 static int once; 226 227 sc = device_get_softc(dev); 228 error = 0; 229 set = NULL; 230 saved_freq = NULL; 231 232 /* 233 * Check that the TSC isn't being used as a timecounter. 234 * If it is, then return EBUSY and refuse to change the 235 * clock speed. 236 */ 237 if (strcmp(timecounter->tc_name, "TSC") == 0) { 238 if (!once) { 239 printf("cpufreq: frequency change with timecounter" 240 " TSC not allowed, see cpufreq(4)\n"); 241 once = 1; 242 } 243 return (EBUSY); 244 } 245 246 CF_MTX_LOCK(&sc->lock); 247 248 /* 249 * If the requested level has a lower priority, don't allow 250 * the new level right now. 251 */ 252 if (priority < sc->curr_priority) { 253 CF_DEBUG("ignoring, curr prio %d less than %d\n", priority, 254 sc->curr_priority); 255 error = EPERM; 256 goto out; 257 } 258 259 /* 260 * If the caller didn't specify a level and one is saved, prepare to 261 * restore the saved level. If none has been saved, return an error. 262 */ 263 if (level == NULL) { 264 saved_freq = SLIST_FIRST(&sc->saved_freq); 265 if (saved_freq == NULL) { 266 CF_DEBUG("NULL level, no saved level\n"); 267 error = ENXIO; 268 goto out; 269 } 270 level = &saved_freq->level; 271 priority = saved_freq->priority; 272 CF_DEBUG("restoring saved level, freq %d prio %d\n", 273 level->total_set.freq, priority); 274 } 275 276 /* Reject levels that are below our specified threshold. */ 277 if (level->total_set.freq < cf_lowest_freq) { 278 CF_DEBUG("rejecting freq %d, less than %d limit\n", 279 level->total_set.freq, cf_lowest_freq); 280 error = EINVAL; 281 goto out; 282 } 283 284 /* If already at this level, just return. */ 285 if (CPUFREQ_CMP(sc->curr_level.total_set.freq, level->total_set.freq)) { 286 CF_DEBUG("skipping freq %d, same as current level %d\n", 287 level->total_set.freq, sc->curr_level.total_set.freq); 288 goto skip; 289 } 290 291 /* First, set the absolute frequency via its driver. */ 292 set = &level->abs_set; 293 if (set->dev) { 294 if (!device_is_attached(set->dev)) { 295 error = ENXIO; 296 goto out; 297 } 298 299 /* Bind to the target CPU before switching, if necessary. */ 300 cpu_id = PCPU_GET(cpuid); 301 pc = cpu_get_pcpu(set->dev); 302 if (cpu_id != pc->pc_cpuid) { 303 mtx_lock_spin(&sched_lock); 304 sched_bind(curthread, pc->pc_cpuid); 305 mtx_unlock_spin(&sched_lock); 306 } 307 CF_DEBUG("setting abs freq %d on %s (cpu %d)\n", set->freq, 308 device_get_nameunit(set->dev), PCPU_GET(cpuid)); 309 error = CPUFREQ_DRV_SET(set->dev, set); 310 if (cpu_id != pc->pc_cpuid) { 311 mtx_lock_spin(&sched_lock); 312 sched_unbind(curthread); 313 mtx_unlock_spin(&sched_lock); 314 } 315 if (error) { 316 goto out; 317 } 318 } 319 320 /* Next, set any/all relative frequencies via their drivers. */ 321 for (i = 0; i < level->rel_count; i++) { 322 set = &level->rel_set[i]; 323 if (!device_is_attached(set->dev)) { 324 error = ENXIO; 325 goto out; 326 } 327 328 /* Bind to the target CPU before switching, if necessary. */ 329 cpu_id = PCPU_GET(cpuid); 330 pc = cpu_get_pcpu(set->dev); 331 if (cpu_id != pc->pc_cpuid) { 332 mtx_lock_spin(&sched_lock); 333 sched_bind(curthread, pc->pc_cpuid); 334 mtx_unlock_spin(&sched_lock); 335 } 336 CF_DEBUG("setting rel freq %d on %s (cpu %d)\n", set->freq, 337 device_get_nameunit(set->dev), PCPU_GET(cpuid)); 338 error = CPUFREQ_DRV_SET(set->dev, set); 339 if (cpu_id != pc->pc_cpuid) { 340 mtx_lock_spin(&sched_lock); 341 sched_unbind(curthread); 342 mtx_unlock_spin(&sched_lock); 343 } 344 if (error) { 345 /* XXX Back out any successful setting? */ 346 goto out; 347 } 348 } 349 350 skip: 351 /* 352 * Before recording the current level, check if we're going to a 353 * higher priority. If so, save the previous level and priority. 354 */ 355 if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN && 356 priority > sc->curr_priority) { 357 CF_DEBUG("saving level, freq %d prio %d\n", 358 sc->curr_level.total_set.freq, sc->curr_priority); 359 curr_freq = malloc(sizeof(*curr_freq), M_TEMP, M_NOWAIT); 360 if (curr_freq == NULL) { 361 error = ENOMEM; 362 goto out; 363 } 364 curr_freq->level = sc->curr_level; 365 curr_freq->priority = sc->curr_priority; 366 SLIST_INSERT_HEAD(&sc->saved_freq, curr_freq, link); 367 } 368 sc->curr_level = *level; 369 sc->curr_priority = priority; 370 371 /* If we were restoring a saved state, reset it to "unused". */ 372 if (saved_freq != NULL) { 373 CF_DEBUG("resetting saved level\n"); 374 sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN; 375 SLIST_REMOVE_HEAD(&sc->saved_freq, link); 376 free(saved_freq, M_TEMP); 377 } 378 379 out: 380 CF_MTX_UNLOCK(&sc->lock); 381 if (error && set) 382 device_printf(set->dev, "set freq failed, err %d\n", error); 383 return (error); 384 } 385 386 static int 387 cf_get_method(device_t dev, struct cf_level *level) 388 { 389 struct cpufreq_softc *sc; 390 struct cf_level *levels; 391 struct cf_setting *curr_set, set; 392 struct pcpu *pc; 393 device_t *devs; 394 int count, error, i, numdevs; 395 uint64_t rate; 396 397 sc = device_get_softc(dev); 398 error = 0; 399 levels = NULL; 400 401 /* If we already know the current frequency, we're done. */ 402 CF_MTX_LOCK(&sc->lock); 403 curr_set = &sc->curr_level.total_set; 404 if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) { 405 CF_DEBUG("get returning known freq %d\n", curr_set->freq); 406 goto out; 407 } 408 CF_MTX_UNLOCK(&sc->lock); 409 410 /* 411 * We need to figure out the current level. Loop through every 412 * driver, getting the current setting. Then, attempt to get a best 413 * match of settings against each level. 414 */ 415 count = CF_MAX_LEVELS; 416 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 417 if (levels == NULL) 418 return (ENOMEM); 419 error = CPUFREQ_LEVELS(sc->dev, levels, &count); 420 if (error) { 421 if (error == E2BIG) 422 printf("cpufreq: need to increase CF_MAX_LEVELS\n"); 423 free(levels, M_TEMP); 424 return (error); 425 } 426 error = device_get_children(device_get_parent(dev), &devs, &numdevs); 427 if (error) { 428 free(levels, M_TEMP); 429 return (error); 430 } 431 432 /* 433 * Reacquire the lock and search for the given level. 434 * 435 * XXX Note: this is not quite right since we really need to go 436 * through each level and compare both absolute and relative 437 * settings for each driver in the system before making a match. 438 * The estimation code below catches this case though. 439 */ 440 CF_MTX_LOCK(&sc->lock); 441 for (i = 0; i < numdevs && curr_set->freq == CPUFREQ_VAL_UNKNOWN; i++) { 442 if (!device_is_attached(devs[i])) 443 continue; 444 error = CPUFREQ_DRV_GET(devs[i], &set); 445 if (error) 446 continue; 447 for (i = 0; i < count; i++) { 448 if (CPUFREQ_CMP(set.freq, levels[i].total_set.freq)) { 449 sc->curr_level = levels[i]; 450 break; 451 } 452 } 453 } 454 free(devs, M_TEMP); 455 if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) { 456 CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq); 457 goto out; 458 } 459 460 /* 461 * We couldn't find an exact match, so attempt to estimate and then 462 * match against a level. 463 */ 464 pc = cpu_get_pcpu(dev); 465 if (pc == NULL) { 466 error = ENXIO; 467 goto out; 468 } 469 cpu_est_clockrate(pc->pc_cpuid, &rate); 470 rate /= 1000000; 471 for (i = 0; i < count; i++) { 472 if (CPUFREQ_CMP(rate, levels[i].total_set.freq)) { 473 sc->curr_level = levels[i]; 474 CF_DEBUG("get estimated freq %d\n", curr_set->freq); 475 break; 476 } 477 } 478 479 out: 480 if (error == 0) 481 *level = sc->curr_level; 482 483 CF_MTX_UNLOCK(&sc->lock); 484 if (levels) 485 free(levels, M_TEMP); 486 return (error); 487 } 488 489 static int 490 cf_levels_method(device_t dev, struct cf_level *levels, int *count) 491 { 492 struct cf_setting_array *set_arr; 493 struct cf_setting_lst rel_sets; 494 struct cpufreq_softc *sc; 495 struct cf_level *lev; 496 struct cf_setting *sets; 497 struct pcpu *pc; 498 device_t *devs; 499 int error, i, numdevs, set_count, type; 500 uint64_t rate; 501 502 if (levels == NULL || count == NULL) 503 return (EINVAL); 504 505 TAILQ_INIT(&rel_sets); 506 sc = device_get_softc(dev); 507 error = device_get_children(device_get_parent(dev), &devs, &numdevs); 508 if (error) 509 return (error); 510 sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT); 511 if (sets == NULL) { 512 free(devs, M_TEMP); 513 return (ENOMEM); 514 } 515 516 /* Get settings from all cpufreq drivers. */ 517 CF_MTX_LOCK(&sc->lock); 518 for (i = 0; i < numdevs; i++) { 519 /* Skip devices that aren't ready. */ 520 if (!device_is_attached(devs[i])) 521 continue; 522 523 /* 524 * Get settings, skipping drivers that offer no settings or 525 * provide settings for informational purposes only. 526 */ 527 error = CPUFREQ_DRV_TYPE(devs[i], &type); 528 if (error || (type & CPUFREQ_FLAG_INFO_ONLY)) { 529 if (error == 0) { 530 CF_DEBUG("skipping info-only driver %s\n", 531 device_get_nameunit(devs[i])); 532 } 533 continue; 534 } 535 set_count = MAX_SETTINGS; 536 error = CPUFREQ_DRV_SETTINGS(devs[i], sets, &set_count); 537 if (error || set_count == 0) 538 continue; 539 540 /* Add the settings to our absolute/relative lists. */ 541 switch (type & CPUFREQ_TYPE_MASK) { 542 case CPUFREQ_TYPE_ABSOLUTE: 543 error = cpufreq_insert_abs(sc, sets, set_count); 544 break; 545 case CPUFREQ_TYPE_RELATIVE: 546 CF_DEBUG("adding %d relative settings\n", set_count); 547 set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT); 548 if (set_arr == NULL) { 549 error = ENOMEM; 550 goto out; 551 } 552 bcopy(sets, set_arr->sets, set_count * sizeof(*sets)); 553 set_arr->count = set_count; 554 TAILQ_INSERT_TAIL(&rel_sets, set_arr, link); 555 break; 556 default: 557 error = EINVAL; 558 } 559 if (error) 560 goto out; 561 } 562 563 /* 564 * If there are no absolute levels, create a fake one at 100%. We 565 * then cache the clockrate for later use as our base frequency. 566 * 567 * XXX This assumes that the first time through, if we only have 568 * relative drivers, the CPU is currently running at 100%. 569 */ 570 if (TAILQ_EMPTY(&sc->all_levels)) { 571 if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) { 572 pc = cpu_get_pcpu(dev); 573 cpu_est_clockrate(pc->pc_cpuid, &rate); 574 sc->max_mhz = rate / 1000000; 575 } 576 memset(&sets[0], CPUFREQ_VAL_UNKNOWN, sizeof(*sets)); 577 sets[0].freq = sc->max_mhz; 578 sets[0].dev = NULL; 579 error = cpufreq_insert_abs(sc, sets, 1); 580 if (error) 581 goto out; 582 } 583 584 /* Create a combined list of absolute + relative levels. */ 585 TAILQ_FOREACH(set_arr, &rel_sets, link) 586 cpufreq_expand_set(sc, set_arr); 587 588 /* If the caller doesn't have enough space, return the actual count. */ 589 if (sc->all_count > *count) { 590 *count = sc->all_count; 591 error = E2BIG; 592 goto out; 593 } 594 595 /* Finally, output the list of levels. */ 596 i = 0; 597 TAILQ_FOREACH(lev, &sc->all_levels, link) { 598 /* Skip levels that have a frequency that is too low. */ 599 if (lev->total_set.freq < cf_lowest_freq) { 600 sc->all_count--; 601 continue; 602 } 603 604 levels[i] = *lev; 605 i++; 606 } 607 *count = sc->all_count; 608 error = 0; 609 610 out: 611 /* Clear all levels since we regenerate them each time. */ 612 while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) { 613 TAILQ_REMOVE(&sc->all_levels, lev, link); 614 free(lev, M_TEMP); 615 } 616 sc->all_count = 0; 617 618 CF_MTX_UNLOCK(&sc->lock); 619 while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) { 620 TAILQ_REMOVE(&rel_sets, set_arr, link); 621 free(set_arr, M_TEMP); 622 } 623 free(devs, M_TEMP); 624 free(sets, M_TEMP); 625 return (error); 626 } 627 628 /* 629 * Create levels for an array of absolute settings and insert them in 630 * sorted order in the specified list. 631 */ 632 static int 633 cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets, 634 int count) 635 { 636 struct cf_level_lst *list; 637 struct cf_level *level, *search; 638 int i; 639 640 CF_MTX_ASSERT(&sc->lock); 641 642 list = &sc->all_levels; 643 for (i = 0; i < count; i++) { 644 level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO); 645 if (level == NULL) 646 return (ENOMEM); 647 level->abs_set = sets[i]; 648 level->total_set = sets[i]; 649 level->total_set.dev = NULL; 650 sc->all_count++; 651 652 if (TAILQ_EMPTY(list)) { 653 CF_DEBUG("adding abs setting %d at head\n", 654 sets[i].freq); 655 TAILQ_INSERT_HEAD(list, level, link); 656 continue; 657 } 658 659 TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link) { 660 if (sets[i].freq <= search->total_set.freq) { 661 CF_DEBUG("adding abs setting %d after %d\n", 662 sets[i].freq, search->total_set.freq); 663 TAILQ_INSERT_AFTER(list, search, level, link); 664 break; 665 } 666 } 667 } 668 return (0); 669 } 670 671 /* 672 * Expand a group of relative settings, creating derived levels from them. 673 */ 674 static int 675 cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr) 676 { 677 struct cf_level *fill, *search; 678 struct cf_setting *set; 679 int i; 680 681 CF_MTX_ASSERT(&sc->lock); 682 683 /* 684 * Walk the set of all existing levels in reverse. This is so we 685 * create derived states from the lowest absolute settings first 686 * and discard duplicates created from higher absolute settings. 687 * For instance, a level of 50 Mhz derived from 100 Mhz + 50% is 688 * preferable to 200 Mhz + 25% because absolute settings are more 689 * efficient since they often change the voltage as well. 690 */ 691 TAILQ_FOREACH_REVERSE(search, &sc->all_levels, cf_level_lst, link) { 692 /* Add each setting to the level, duplicating if necessary. */ 693 for (i = 0; i < set_arr->count; i++) { 694 set = &set_arr->sets[i]; 695 696 /* 697 * If this setting is less than 100%, split the level 698 * into two and add this setting to the new level. 699 */ 700 fill = search; 701 if (set->freq < 10000) { 702 fill = cpufreq_dup_set(sc, search, set); 703 704 /* 705 * The new level was a duplicate of an existing 706 * level or its absolute setting is too high 707 * so we freed it. For example, we discard a 708 * derived level of 1000 MHz/25% if a level 709 * of 500 MHz/100% already exists. 710 */ 711 if (fill == NULL) 712 break; 713 } 714 715 /* Add this setting to the existing or new level. */ 716 KASSERT(fill->rel_count < MAX_SETTINGS, 717 ("cpufreq: too many relative drivers (%d)", 718 MAX_SETTINGS)); 719 fill->rel_set[fill->rel_count] = *set; 720 fill->rel_count++; 721 CF_DEBUG( 722 "expand set added rel setting %d%% to %d level\n", 723 set->freq / 100, fill->total_set.freq); 724 } 725 } 726 727 return (0); 728 } 729 730 static struct cf_level * 731 cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup, 732 struct cf_setting *set) 733 { 734 struct cf_level_lst *list; 735 struct cf_level *fill, *itr; 736 struct cf_setting *fill_set, *itr_set; 737 int i; 738 739 CF_MTX_ASSERT(&sc->lock); 740 741 /* 742 * Create a new level, copy it from the old one, and update the 743 * total frequency and power by the percentage specified in the 744 * relative setting. 745 */ 746 fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT); 747 if (fill == NULL) 748 return (NULL); 749 *fill = *dup; 750 fill_set = &fill->total_set; 751 fill_set->freq = 752 ((uint64_t)fill_set->freq * set->freq) / 10000; 753 if (fill_set->power != CPUFREQ_VAL_UNKNOWN) { 754 fill_set->power = ((uint64_t)fill_set->power * set->freq) 755 / 10000; 756 } 757 if (set->lat != CPUFREQ_VAL_UNKNOWN) { 758 if (fill_set->lat != CPUFREQ_VAL_UNKNOWN) 759 fill_set->lat += set->lat; 760 else 761 fill_set->lat = set->lat; 762 } 763 CF_DEBUG("dup set considering derived setting %d\n", fill_set->freq); 764 765 /* 766 * If we copied an old level that we already modified (say, at 100%), 767 * we need to remove that setting before adding this one. Since we 768 * process each setting array in order, we know any settings for this 769 * driver will be found at the end. 770 */ 771 for (i = fill->rel_count; i != 0; i--) { 772 if (fill->rel_set[i - 1].dev != set->dev) 773 break; 774 CF_DEBUG("removed last relative driver: %s\n", 775 device_get_nameunit(set->dev)); 776 fill->rel_count--; 777 } 778 779 /* 780 * Insert the new level in sorted order. If it is a duplicate of an 781 * existing level (1) or has an absolute setting higher than the 782 * existing level (2), do not add it. We can do this since any such 783 * level is guaranteed use less power. For example (1), a level with 784 * one absolute setting of 800 Mhz uses less power than one composed 785 * of an absolute setting of 1600 Mhz and a relative setting at 50%. 786 * Also for example (2), a level of 800 Mhz/75% is preferable to 787 * 1600 Mhz/25% even though the latter has a lower total frequency. 788 */ 789 list = &sc->all_levels; 790 KASSERT(!TAILQ_EMPTY(list), ("all levels list empty in dup set")); 791 TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) { 792 itr_set = &itr->total_set; 793 if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) { 794 CF_DEBUG("dup set rejecting %d (dupe)\n", 795 fill_set->freq); 796 itr = NULL; 797 break; 798 } else if (fill_set->freq < itr_set->freq) { 799 if (fill->abs_set.freq <= itr->abs_set.freq) { 800 CF_DEBUG( 801 "dup done, inserting new level %d after %d\n", 802 fill_set->freq, itr_set->freq); 803 TAILQ_INSERT_AFTER(list, itr, fill, link); 804 sc->all_count++; 805 } else { 806 CF_DEBUG("dup set rejecting %d (abs too big)\n", 807 fill_set->freq); 808 itr = NULL; 809 } 810 break; 811 } 812 } 813 814 /* We didn't find a good place for this new level so free it. */ 815 if (itr == NULL) { 816 CF_DEBUG("dup set freeing new level %d (not optimal)\n", 817 fill_set->freq); 818 free(fill, M_TEMP); 819 fill = NULL; 820 } 821 822 return (fill); 823 } 824 825 static int 826 cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS) 827 { 828 struct cpufreq_softc *sc; 829 struct cf_level *levels; 830 int count, devcount, error, freq, i, n; 831 device_t *devs; 832 833 devs = NULL; 834 sc = oidp->oid_arg1; 835 levels = malloc(CF_MAX_LEVELS * sizeof(*levels), M_TEMP, M_NOWAIT); 836 if (levels == NULL) 837 return (ENOMEM); 838 839 error = CPUFREQ_GET(sc->dev, &levels[0]); 840 if (error) 841 goto out; 842 freq = levels[0].total_set.freq; 843 error = sysctl_handle_int(oidp, &freq, 0, req); 844 if (error != 0 || req->newptr == NULL) 845 goto out; 846 847 /* 848 * While we only call cpufreq_get() on one device (assuming all 849 * CPUs have equal levels), we call cpufreq_set() on all CPUs. 850 * This is needed for some MP systems. 851 */ 852 error = devclass_get_devices(cpufreq_dc, &devs, &devcount); 853 if (error) 854 goto out; 855 for (n = 0; n < devcount; n++) { 856 count = CF_MAX_LEVELS; 857 error = CPUFREQ_LEVELS(devs[n], levels, &count); 858 if (error) { 859 if (error == E2BIG) 860 printf( 861 "cpufreq: need to increase CF_MAX_LEVELS\n"); 862 break; 863 } 864 for (i = 0; i < count; i++) { 865 if (CPUFREQ_CMP(levels[i].total_set.freq, freq)) { 866 error = CPUFREQ_SET(devs[n], &levels[i], 867 CPUFREQ_PRIO_USER); 868 break; 869 } 870 } 871 if (i == count) { 872 error = EINVAL; 873 break; 874 } 875 } 876 877 out: 878 if (devs) 879 free(devs, M_TEMP); 880 if (levels) 881 free(levels, M_TEMP); 882 return (error); 883 } 884 885 static int 886 cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS) 887 { 888 struct cpufreq_softc *sc; 889 struct cf_level *levels; 890 struct cf_setting *set; 891 struct sbuf sb; 892 int count, error, i; 893 894 sc = oidp->oid_arg1; 895 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 896 897 /* Get settings from the device and generate the output string. */ 898 count = CF_MAX_LEVELS; 899 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 900 if (levels == NULL) 901 return (ENOMEM); 902 error = CPUFREQ_LEVELS(sc->dev, levels, &count); 903 if (error) { 904 if (error == E2BIG) 905 printf("cpufreq: need to increase CF_MAX_LEVELS\n"); 906 goto out; 907 } 908 if (count) { 909 for (i = 0; i < count; i++) { 910 set = &levels[i].total_set; 911 sbuf_printf(&sb, "%d/%d ", set->freq, set->power); 912 } 913 } else 914 sbuf_cpy(&sb, "0"); 915 sbuf_trim(&sb); 916 sbuf_finish(&sb); 917 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 918 919 out: 920 free(levels, M_TEMP); 921 sbuf_delete(&sb); 922 return (error); 923 } 924 925 static int 926 cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS) 927 { 928 device_t dev; 929 struct cf_setting *sets; 930 struct sbuf sb; 931 int error, i, set_count; 932 933 dev = oidp->oid_arg1; 934 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 935 936 /* Get settings from the device and generate the output string. */ 937 set_count = MAX_SETTINGS; 938 sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT); 939 if (sets == NULL) 940 return (ENOMEM); 941 error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count); 942 if (error) 943 goto out; 944 if (set_count) { 945 for (i = 0; i < set_count; i++) 946 sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power); 947 } else 948 sbuf_cpy(&sb, "0"); 949 sbuf_trim(&sb); 950 sbuf_finish(&sb); 951 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 952 953 out: 954 free(sets, M_TEMP); 955 sbuf_delete(&sb); 956 return (error); 957 } 958 959 int 960 cpufreq_register(device_t dev) 961 { 962 struct cpufreq_softc *sc; 963 device_t cf_dev, cpu_dev; 964 965 /* Add a sysctl to get each driver's settings separately. */ 966 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 967 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 968 OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, dev, 0, 969 cpufreq_settings_sysctl, "A", "CPU frequency driver settings"); 970 971 /* 972 * Add only one cpufreq device to each CPU. Currently, all CPUs 973 * must offer the same levels and be switched at the same time. 974 */ 975 cpu_dev = device_get_parent(dev); 976 if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) { 977 sc = device_get_softc(cf_dev); 978 sc->max_mhz = CPUFREQ_VAL_UNKNOWN; 979 return (0); 980 } 981 982 /* Add the child device and possibly sysctls. */ 983 cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", -1); 984 if (cf_dev == NULL) 985 return (ENOMEM); 986 device_quiet(cf_dev); 987 988 return (device_probe_and_attach(cf_dev)); 989 } 990 991 int 992 cpufreq_unregister(device_t dev) 993 { 994 device_t cf_dev, *devs; 995 int cfcount, devcount, error, i, type; 996 997 /* 998 * If this is the last cpufreq child device, remove the control 999 * device as well. We identify cpufreq children by calling a method 1000 * they support. 1001 */ 1002 error = device_get_children(device_get_parent(dev), &devs, &devcount); 1003 if (error) 1004 return (error); 1005 cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1); 1006 if (cf_dev == NULL) { 1007 device_printf(dev, 1008 "warning: cpufreq_unregister called with no cpufreq device active\n"); 1009 return (0); 1010 } 1011 cfcount = 0; 1012 for (i = 0; i < devcount; i++) { 1013 if (!device_is_attached(devs[i])) 1014 continue; 1015 if (CPUFREQ_DRV_TYPE(devs[i], &type) == 0) 1016 cfcount++; 1017 } 1018 if (cfcount <= 1) 1019 device_delete_child(device_get_parent(cf_dev), cf_dev); 1020 free(devs, M_TEMP); 1021 1022 return (0); 1023 } 1024