1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * menu.c - the menu idle governor 4 * 5 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> 6 * Copyright (C) 2009 Intel Corporation 7 * Author: 8 * Arjan van de Ven <arjan@linux.intel.com> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/cpuidle.h> 13 #include <linux/time.h> 14 #include <linux/ktime.h> 15 #include <linux/hrtimer.h> 16 #include <linux/tick.h> 17 #include <linux/sched/stat.h> 18 #include <linux/math64.h> 19 20 #include "gov.h" 21 22 #define BUCKETS 12 23 #define INTERVAL_SHIFT 3 24 #define INTERVALS (1UL << INTERVAL_SHIFT) 25 #define RESOLUTION 1024 26 #define DECAY 8 27 #define MAX_INTERESTING (50000 * NSEC_PER_USEC) 28 29 /* 30 * Concepts and ideas behind the menu governor 31 * 32 * For the menu governor, there are 3 decision factors for picking a C 33 * state: 34 * 1) Energy break even point 35 * 2) Performance impact 36 * 3) Latency tolerance (from pmqos infrastructure) 37 * These three factors are treated independently. 38 * 39 * Energy break even point 40 * ----------------------- 41 * C state entry and exit have an energy cost, and a certain amount of time in 42 * the C state is required to actually break even on this cost. CPUIDLE 43 * provides us this duration in the "target_residency" field. So all that we 44 * need is a good prediction of how long we'll be idle. Like the traditional 45 * menu governor, we start with the actual known "next timer event" time. 46 * 47 * Since there are other source of wakeups (interrupts for example) than 48 * the next timer event, this estimation is rather optimistic. To get a 49 * more realistic estimate, a correction factor is applied to the estimate, 50 * that is based on historic behavior. For example, if in the past the actual 51 * duration always was 50% of the next timer tick, the correction factor will 52 * be 0.5. 53 * 54 * menu uses a running average for this correction factor, however it uses a 55 * set of factors, not just a single factor. This stems from the realization 56 * that the ratio is dependent on the order of magnitude of the expected 57 * duration; if we expect 500 milliseconds of idle time the likelihood of 58 * getting an interrupt very early is much higher than if we expect 50 micro 59 * seconds of idle time. A second independent factor that has big impact on 60 * the actual factor is if there is (disk) IO outstanding or not. 61 * (as a special twist, we consider every sleep longer than 50 milliseconds 62 * as perfect; there are no power gains for sleeping longer than this) 63 * 64 * For these two reasons we keep an array of 12 independent factors, that gets 65 * indexed based on the magnitude of the expected duration as well as the 66 * "is IO outstanding" property. 67 * 68 * Repeatable-interval-detector 69 * ---------------------------- 70 * There are some cases where "next timer" is a completely unusable predictor: 71 * Those cases where the interval is fixed, for example due to hardware 72 * interrupt mitigation, but also due to fixed transfer rate devices such as 73 * mice. 74 * For this, we use a different predictor: We track the duration of the last 8 75 * intervals and if the stand deviation of these 8 intervals is below a 76 * threshold value, we use the average of these intervals as prediction. 77 * 78 * Limiting Performance Impact 79 * --------------------------- 80 * C states, especially those with large exit latencies, can have a real 81 * noticeable impact on workloads, which is not acceptable for most sysadmins, 82 * and in addition, less performance has a power price of its own. 83 * 84 * As a general rule of thumb, menu assumes that the following heuristic 85 * holds: 86 * The busier the system, the less impact of C states is acceptable 87 * 88 * This rule-of-thumb is implemented using a performance-multiplier: 89 * If the exit latency times the performance multiplier is longer than 90 * the predicted duration, the C state is not considered a candidate 91 * for selection due to a too high performance impact. So the higher 92 * this multiplier is, the longer we need to be idle to pick a deep C 93 * state, and thus the less likely a busy CPU will hit such a deep 94 * C state. 95 * 96 * Currently there is only one value determining the factor: 97 * 10 points are added for each process that is waiting for IO on this CPU. 98 * (This value was experimentally determined.) 99 * Utilization is no longer a factor as it was shown that it never contributed 100 * significantly to the performance multiplier in the first place. 101 * 102 */ 103 104 struct menu_device { 105 int needs_update; 106 int tick_wakeup; 107 108 u64 next_timer_ns; 109 unsigned int bucket; 110 unsigned int correction_factor[BUCKETS]; 111 unsigned int intervals[INTERVALS]; 112 int interval_ptr; 113 }; 114 115 static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters) 116 { 117 int bucket = 0; 118 119 /* 120 * We keep two groups of stats; one with no 121 * IO pending, one without. 122 * This allows us to calculate 123 * E(duration)|iowait 124 */ 125 if (nr_iowaiters) 126 bucket = BUCKETS/2; 127 128 if (duration_ns < 10ULL * NSEC_PER_USEC) 129 return bucket; 130 if (duration_ns < 100ULL * NSEC_PER_USEC) 131 return bucket + 1; 132 if (duration_ns < 1000ULL * NSEC_PER_USEC) 133 return bucket + 2; 134 if (duration_ns < 10000ULL * NSEC_PER_USEC) 135 return bucket + 3; 136 if (duration_ns < 100000ULL * NSEC_PER_USEC) 137 return bucket + 4; 138 return bucket + 5; 139 } 140 141 /* 142 * Return a multiplier for the exit latency that is intended 143 * to take performance requirements into account. 144 * The more performance critical we estimate the system 145 * to be, the higher this multiplier, and thus the higher 146 * the barrier to go to an expensive C state. 147 */ 148 static inline int performance_multiplier(unsigned int nr_iowaiters) 149 { 150 /* for IO wait tasks (per cpu!) we add 10x each */ 151 return 1 + 10 * nr_iowaiters; 152 } 153 154 static DEFINE_PER_CPU(struct menu_device, menu_devices); 155 156 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); 157 158 /* 159 * Try detecting repeating patterns by keeping track of the last 8 160 * intervals, and checking if the standard deviation of that set 161 * of points is below a threshold. If it is... then use the 162 * average of these 8 points as the estimated value. 163 */ 164 static unsigned int get_typical_interval(struct menu_device *data) 165 { 166 int i, divisor; 167 unsigned int min, max, thresh, avg; 168 uint64_t sum, variance; 169 170 thresh = INT_MAX; /* Discard outliers above this value */ 171 172 again: 173 174 /* First calculate the average of past intervals */ 175 min = UINT_MAX; 176 max = 0; 177 sum = 0; 178 divisor = 0; 179 for (i = 0; i < INTERVALS; i++) { 180 unsigned int value = data->intervals[i]; 181 if (value <= thresh) { 182 sum += value; 183 divisor++; 184 if (value > max) 185 max = value; 186 187 if (value < min) 188 min = value; 189 } 190 } 191 192 if (!max) 193 return UINT_MAX; 194 195 if (divisor == INTERVALS) 196 avg = sum >> INTERVAL_SHIFT; 197 else 198 avg = div_u64(sum, divisor); 199 200 /* Then try to determine variance */ 201 variance = 0; 202 for (i = 0; i < INTERVALS; i++) { 203 unsigned int value = data->intervals[i]; 204 if (value <= thresh) { 205 int64_t diff = (int64_t)value - avg; 206 variance += diff * diff; 207 } 208 } 209 if (divisor == INTERVALS) 210 variance >>= INTERVAL_SHIFT; 211 else 212 do_div(variance, divisor); 213 214 /* 215 * The typical interval is obtained when standard deviation is 216 * small (stddev <= 20 us, variance <= 400 us^2) or standard 217 * deviation is small compared to the average interval (avg > 218 * 6*stddev, avg^2 > 36*variance). The average is smaller than 219 * UINT_MAX aka U32_MAX, so computing its square does not 220 * overflow a u64. We simply reject this candidate average if 221 * the standard deviation is greater than 715 s (which is 222 * rather unlikely). 223 * 224 * Use this result only if there is no timer to wake us up sooner. 225 */ 226 if (likely(variance <= U64_MAX/36)) { 227 if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3)) 228 || variance <= 400) { 229 return avg; 230 } 231 } 232 233 /* 234 * If we have outliers to the upside in our distribution, discard 235 * those by setting the threshold to exclude these outliers, then 236 * calculate the average and standard deviation again. Once we get 237 * down to the bottom 3/4 of our samples, stop excluding samples. 238 * 239 * This can deal with workloads that have long pauses interspersed 240 * with sporadic activity with a bunch of short pauses. 241 */ 242 if ((divisor * 4) <= INTERVALS * 3) 243 return UINT_MAX; 244 245 thresh = max - 1; 246 goto again; 247 } 248 249 /** 250 * menu_select - selects the next idle state to enter 251 * @drv: cpuidle driver containing state data 252 * @dev: the CPU 253 * @stop_tick: indication on whether or not to stop the tick 254 */ 255 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, 256 bool *stop_tick) 257 { 258 struct menu_device *data = this_cpu_ptr(&menu_devices); 259 s64 latency_req = cpuidle_governor_latency_req(dev->cpu); 260 u64 predicted_ns; 261 u64 interactivity_req; 262 unsigned int nr_iowaiters; 263 ktime_t delta, delta_tick; 264 int i, idx; 265 266 if (data->needs_update) { 267 menu_update(drv, dev); 268 data->needs_update = 0; 269 } 270 271 nr_iowaiters = nr_iowait_cpu(dev->cpu); 272 273 /* Find the shortest expected idle interval. */ 274 predicted_ns = get_typical_interval(data) * NSEC_PER_USEC; 275 if (predicted_ns > RESIDENCY_THRESHOLD_NS) { 276 unsigned int timer_us; 277 278 /* Determine the time till the closest timer. */ 279 delta = tick_nohz_get_sleep_length(&delta_tick); 280 if (unlikely(delta < 0)) { 281 delta = 0; 282 delta_tick = 0; 283 } 284 285 data->next_timer_ns = delta; 286 data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters); 287 288 /* Round up the result for half microseconds. */ 289 timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 + 290 data->next_timer_ns * 291 data->correction_factor[data->bucket], 292 RESOLUTION * DECAY * NSEC_PER_USEC); 293 /* Use the lowest expected idle interval to pick the idle state. */ 294 predicted_ns = min((u64)timer_us * NSEC_PER_USEC, predicted_ns); 295 } else { 296 /* 297 * Because the next timer event is not going to be determined 298 * in this case, assume that without the tick the closest timer 299 * will be in distant future and that the closest tick will occur 300 * after 1/2 of the tick period. 301 */ 302 data->next_timer_ns = KTIME_MAX; 303 delta_tick = TICK_NSEC / 2; 304 data->bucket = which_bucket(KTIME_MAX, nr_iowaiters); 305 } 306 307 if (unlikely(drv->state_count <= 1 || latency_req == 0) || 308 ((data->next_timer_ns < drv->states[1].target_residency_ns || 309 latency_req < drv->states[1].exit_latency_ns) && 310 !dev->states_usage[0].disable)) { 311 /* 312 * In this case state[0] will be used no matter what, so return 313 * it right away and keep the tick running if state[0] is a 314 * polling one. 315 */ 316 *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING); 317 return 0; 318 } 319 320 if (tick_nohz_tick_stopped()) { 321 /* 322 * If the tick is already stopped, the cost of possible short 323 * idle duration misprediction is much higher, because the CPU 324 * may be stuck in a shallow idle state for a long time as a 325 * result of it. In that case say we might mispredict and use 326 * the known time till the closest timer event for the idle 327 * state selection. 328 */ 329 if (predicted_ns < TICK_NSEC) 330 predicted_ns = data->next_timer_ns; 331 } else { 332 /* 333 * Use the performance multiplier and the user-configurable 334 * latency_req to determine the maximum exit latency. 335 */ 336 interactivity_req = div64_u64(predicted_ns, 337 performance_multiplier(nr_iowaiters)); 338 if (latency_req > interactivity_req) 339 latency_req = interactivity_req; 340 } 341 342 /* 343 * Find the idle state with the lowest power while satisfying 344 * our constraints. 345 */ 346 idx = -1; 347 for (i = 0; i < drv->state_count; i++) { 348 struct cpuidle_state *s = &drv->states[i]; 349 350 if (dev->states_usage[i].disable) 351 continue; 352 353 if (idx == -1) 354 idx = i; /* first enabled state */ 355 356 if (s->target_residency_ns > predicted_ns) { 357 /* 358 * Use a physical idle state, not busy polling, unless 359 * a timer is going to trigger soon enough. 360 */ 361 if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && 362 s->exit_latency_ns <= latency_req && 363 s->target_residency_ns <= data->next_timer_ns) { 364 predicted_ns = s->target_residency_ns; 365 idx = i; 366 break; 367 } 368 if (predicted_ns < TICK_NSEC) 369 break; 370 371 if (!tick_nohz_tick_stopped()) { 372 /* 373 * If the state selected so far is shallow, 374 * waking up early won't hurt, so retain the 375 * tick in that case and let the governor run 376 * again in the next iteration of the loop. 377 */ 378 predicted_ns = drv->states[idx].target_residency_ns; 379 break; 380 } 381 382 /* 383 * If the state selected so far is shallow and this 384 * state's target residency matches the time till the 385 * closest timer event, select this one to avoid getting 386 * stuck in the shallow one for too long. 387 */ 388 if (drv->states[idx].target_residency_ns < TICK_NSEC && 389 s->target_residency_ns <= delta_tick) 390 idx = i; 391 392 return idx; 393 } 394 if (s->exit_latency_ns > latency_req) 395 break; 396 397 idx = i; 398 } 399 400 if (idx == -1) 401 idx = 0; /* No states enabled. Must use 0. */ 402 403 /* 404 * Don't stop the tick if the selected state is a polling one or if the 405 * expected idle duration is shorter than the tick period length. 406 */ 407 if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || 408 predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) { 409 *stop_tick = false; 410 411 if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) { 412 /* 413 * The tick is not going to be stopped and the target 414 * residency of the state to be returned is not within 415 * the time until the next timer event including the 416 * tick, so try to correct that. 417 */ 418 for (i = idx - 1; i >= 0; i--) { 419 if (dev->states_usage[i].disable) 420 continue; 421 422 idx = i; 423 if (drv->states[i].target_residency_ns <= delta_tick) 424 break; 425 } 426 } 427 } 428 429 return idx; 430 } 431 432 /** 433 * menu_reflect - records that data structures need update 434 * @dev: the CPU 435 * @index: the index of actual entered state 436 * 437 * NOTE: it's important to be fast here because this operation will add to 438 * the overall exit latency. 439 */ 440 static void menu_reflect(struct cpuidle_device *dev, int index) 441 { 442 struct menu_device *data = this_cpu_ptr(&menu_devices); 443 444 dev->last_state_idx = index; 445 data->needs_update = 1; 446 data->tick_wakeup = tick_nohz_idle_got_tick(); 447 } 448 449 /** 450 * menu_update - attempts to guess what happened after entry 451 * @drv: cpuidle driver containing state data 452 * @dev: the CPU 453 */ 454 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) 455 { 456 struct menu_device *data = this_cpu_ptr(&menu_devices); 457 int last_idx = dev->last_state_idx; 458 struct cpuidle_state *target = &drv->states[last_idx]; 459 u64 measured_ns; 460 unsigned int new_factor; 461 462 /* 463 * Try to figure out how much time passed between entry to low 464 * power state and occurrence of the wakeup event. 465 * 466 * If the entered idle state didn't support residency measurements, 467 * we use them anyway if they are short, and if long, 468 * truncate to the whole expected time. 469 * 470 * Any measured amount of time will include the exit latency. 471 * Since we are interested in when the wakeup begun, not when it 472 * was completed, we must subtract the exit latency. However, if 473 * the measured amount of time is less than the exit latency, 474 * assume the state was never reached and the exit latency is 0. 475 */ 476 477 if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) { 478 /* 479 * The nohz code said that there wouldn't be any events within 480 * the tick boundary (if the tick was stopped), but the idle 481 * duration predictor had a differing opinion. Since the CPU 482 * was woken up by a tick (that wasn't stopped after all), the 483 * predictor was not quite right, so assume that the CPU could 484 * have been idle long (but not forever) to help the idle 485 * duration predictor do a better job next time. 486 */ 487 measured_ns = 9 * MAX_INTERESTING / 10; 488 } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) && 489 dev->poll_time_limit) { 490 /* 491 * The CPU exited the "polling" state due to a time limit, so 492 * the idle duration prediction leading to the selection of that 493 * state was inaccurate. If a better prediction had been made, 494 * the CPU might have been woken up from idle by the next timer. 495 * Assume that to be the case. 496 */ 497 measured_ns = data->next_timer_ns; 498 } else { 499 /* measured value */ 500 measured_ns = dev->last_residency_ns; 501 502 /* Deduct exit latency */ 503 if (measured_ns > 2 * target->exit_latency_ns) 504 measured_ns -= target->exit_latency_ns; 505 else 506 measured_ns /= 2; 507 } 508 509 /* Make sure our coefficients do not exceed unity */ 510 if (measured_ns > data->next_timer_ns) 511 measured_ns = data->next_timer_ns; 512 513 /* Update our correction ratio */ 514 new_factor = data->correction_factor[data->bucket]; 515 new_factor -= new_factor / DECAY; 516 517 if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING) 518 new_factor += div64_u64(RESOLUTION * measured_ns, 519 data->next_timer_ns); 520 else 521 /* 522 * we were idle so long that we count it as a perfect 523 * prediction 524 */ 525 new_factor += RESOLUTION; 526 527 /* 528 * We don't want 0 as factor; we always want at least 529 * a tiny bit of estimated time. Fortunately, due to rounding, 530 * new_factor will stay nonzero regardless of measured_us values 531 * and the compiler can eliminate this test as long as DECAY > 1. 532 */ 533 if (DECAY == 1 && unlikely(new_factor == 0)) 534 new_factor = 1; 535 536 data->correction_factor[data->bucket] = new_factor; 537 538 /* update the repeating-pattern data */ 539 data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns); 540 if (data->interval_ptr >= INTERVALS) 541 data->interval_ptr = 0; 542 } 543 544 /** 545 * menu_enable_device - scans a CPU's states and does setup 546 * @drv: cpuidle driver 547 * @dev: the CPU 548 */ 549 static int menu_enable_device(struct cpuidle_driver *drv, 550 struct cpuidle_device *dev) 551 { 552 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 553 int i; 554 555 memset(data, 0, sizeof(struct menu_device)); 556 557 /* 558 * if the correction factor is 0 (eg first time init or cpu hotplug 559 * etc), we actually want to start out with a unity factor. 560 */ 561 for(i = 0; i < BUCKETS; i++) 562 data->correction_factor[i] = RESOLUTION * DECAY; 563 564 return 0; 565 } 566 567 static struct cpuidle_governor menu_governor = { 568 .name = "menu", 569 .rating = 20, 570 .enable = menu_enable_device, 571 .select = menu_select, 572 .reflect = menu_reflect, 573 }; 574 575 /** 576 * init_menu - initializes the governor 577 */ 578 static int __init init_menu(void) 579 { 580 return cpuidle_register_governor(&menu_governor); 581 } 582 583 postcore_initcall(init_menu); 584