xref: /linux/drivers/cpuidle/governors/menu.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * menu.c - the menu idle governor
3  *
4  * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5  * Copyright (C) 2009 Intel Corporation
6  * Author:
7  *        Arjan van de Ven <arjan@linux.intel.com>
8  *
9  * This code is licenced under the GPL version 2 as described
10  * in the COPYING file that acompanies the Linux Kernel.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/pm_qos.h>
16 #include <linux/time.h>
17 #include <linux/ktime.h>
18 #include <linux/hrtimer.h>
19 #include <linux/tick.h>
20 #include <linux/sched.h>
21 #include <linux/math64.h>
22 #include <linux/module.h>
23 
24 /*
25  * Please note when changing the tuning values:
26  * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
27  * a scaling operation multiplication may overflow on 32 bit platforms.
28  * In that case, #define RESOLUTION as ULL to get 64 bit result:
29  * #define RESOLUTION 1024ULL
30  *
31  * The default values do not overflow.
32  */
33 #define BUCKETS 12
34 #define INTERVALS 8
35 #define RESOLUTION 1024
36 #define DECAY 8
37 #define MAX_INTERESTING 50000
38 #define STDDEV_THRESH 400
39 
40 
41 /*
42  * Concepts and ideas behind the menu governor
43  *
44  * For the menu governor, there are 3 decision factors for picking a C
45  * state:
46  * 1) Energy break even point
47  * 2) Performance impact
48  * 3) Latency tolerance (from pmqos infrastructure)
49  * These these three factors are treated independently.
50  *
51  * Energy break even point
52  * -----------------------
53  * C state entry and exit have an energy cost, and a certain amount of time in
54  * the  C state is required to actually break even on this cost. CPUIDLE
55  * provides us this duration in the "target_residency" field. So all that we
56  * need is a good prediction of how long we'll be idle. Like the traditional
57  * menu governor, we start with the actual known "next timer event" time.
58  *
59  * Since there are other source of wakeups (interrupts for example) than
60  * the next timer event, this estimation is rather optimistic. To get a
61  * more realistic estimate, a correction factor is applied to the estimate,
62  * that is based on historic behavior. For example, if in the past the actual
63  * duration always was 50% of the next timer tick, the correction factor will
64  * be 0.5.
65  *
66  * menu uses a running average for this correction factor, however it uses a
67  * set of factors, not just a single factor. This stems from the realization
68  * that the ratio is dependent on the order of magnitude of the expected
69  * duration; if we expect 500 milliseconds of idle time the likelihood of
70  * getting an interrupt very early is much higher than if we expect 50 micro
71  * seconds of idle time. A second independent factor that has big impact on
72  * the actual factor is if there is (disk) IO outstanding or not.
73  * (as a special twist, we consider every sleep longer than 50 milliseconds
74  * as perfect; there are no power gains for sleeping longer than this)
75  *
76  * For these two reasons we keep an array of 12 independent factors, that gets
77  * indexed based on the magnitude of the expected duration as well as the
78  * "is IO outstanding" property.
79  *
80  * Repeatable-interval-detector
81  * ----------------------------
82  * There are some cases where "next timer" is a completely unusable predictor:
83  * Those cases where the interval is fixed, for example due to hardware
84  * interrupt mitigation, but also due to fixed transfer rate devices such as
85  * mice.
86  * For this, we use a different predictor: We track the duration of the last 8
87  * intervals and if the stand deviation of these 8 intervals is below a
88  * threshold value, we use the average of these intervals as prediction.
89  *
90  * Limiting Performance Impact
91  * ---------------------------
92  * C states, especially those with large exit latencies, can have a real
93  * noticeable impact on workloads, which is not acceptable for most sysadmins,
94  * and in addition, less performance has a power price of its own.
95  *
96  * As a general rule of thumb, menu assumes that the following heuristic
97  * holds:
98  *     The busier the system, the less impact of C states is acceptable
99  *
100  * This rule-of-thumb is implemented using a performance-multiplier:
101  * If the exit latency times the performance multiplier is longer than
102  * the predicted duration, the C state is not considered a candidate
103  * for selection due to a too high performance impact. So the higher
104  * this multiplier is, the longer we need to be idle to pick a deep C
105  * state, and thus the less likely a busy CPU will hit such a deep
106  * C state.
107  *
108  * Two factors are used in determing this multiplier:
109  * a value of 10 is added for each point of "per cpu load average" we have.
110  * a value of 5 points is added for each process that is waiting for
111  * IO on this CPU.
112  * (these values are experimentally determined)
113  *
114  * The load average factor gives a longer term (few seconds) input to the
115  * decision, while the iowait value gives a cpu local instantanious input.
116  * The iowait factor may look low, but realize that this is also already
117  * represented in the system load average.
118  *
119  */
120 
121 struct menu_device {
122 	int		last_state_idx;
123 	int             needs_update;
124 
125 	unsigned int	expected_us;
126 	unsigned int	predicted_us;
127 	unsigned int	exit_us;
128 	unsigned int	bucket;
129 	unsigned int	correction_factor[BUCKETS];
130 	unsigned int	intervals[INTERVALS];
131 	int		interval_ptr;
132 };
133 
134 
135 #define LOAD_INT(x) ((x) >> FSHIFT)
136 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
137 
138 static int get_loadavg(void)
139 {
140 	unsigned long this = this_cpu_load();
141 
142 
143 	return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
144 }
145 
146 static inline int which_bucket(unsigned int duration)
147 {
148 	int bucket = 0;
149 
150 	/*
151 	 * We keep two groups of stats; one with no
152 	 * IO pending, one without.
153 	 * This allows us to calculate
154 	 * E(duration)|iowait
155 	 */
156 	if (nr_iowait_cpu(smp_processor_id()))
157 		bucket = BUCKETS/2;
158 
159 	if (duration < 10)
160 		return bucket;
161 	if (duration < 100)
162 		return bucket + 1;
163 	if (duration < 1000)
164 		return bucket + 2;
165 	if (duration < 10000)
166 		return bucket + 3;
167 	if (duration < 100000)
168 		return bucket + 4;
169 	return bucket + 5;
170 }
171 
172 /*
173  * Return a multiplier for the exit latency that is intended
174  * to take performance requirements into account.
175  * The more performance critical we estimate the system
176  * to be, the higher this multiplier, and thus the higher
177  * the barrier to go to an expensive C state.
178  */
179 static inline int performance_multiplier(void)
180 {
181 	int mult = 1;
182 
183 	/* for higher loadavg, we are more reluctant */
184 
185 	mult += 2 * get_loadavg();
186 
187 	/* for IO wait tasks (per cpu!) we add 5x each */
188 	mult += 10 * nr_iowait_cpu(smp_processor_id());
189 
190 	return mult;
191 }
192 
193 static DEFINE_PER_CPU(struct menu_device, menu_devices);
194 
195 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
196 
197 /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
198 static u64 div_round64(u64 dividend, u32 divisor)
199 {
200 	return div_u64(dividend + (divisor / 2), divisor);
201 }
202 
203 /*
204  * Try detecting repeating patterns by keeping track of the last 8
205  * intervals, and checking if the standard deviation of that set
206  * of points is below a threshold. If it is... then use the
207  * average of these 8 points as the estimated value.
208  */
209 static void get_typical_interval(struct menu_device *data)
210 {
211 	int i, divisor;
212 	unsigned int max, thresh;
213 	uint64_t avg, stddev;
214 
215 	thresh = UINT_MAX; /* Discard outliers above this value */
216 
217 again:
218 
219 	/* First calculate the average of past intervals */
220 	max = 0;
221 	avg = 0;
222 	divisor = 0;
223 	for (i = 0; i < INTERVALS; i++) {
224 		unsigned int value = data->intervals[i];
225 		if (value <= thresh) {
226 			avg += value;
227 			divisor++;
228 			if (value > max)
229 				max = value;
230 		}
231 	}
232 	do_div(avg, divisor);
233 
234 	/* Then try to determine standard deviation */
235 	stddev = 0;
236 	for (i = 0; i < INTERVALS; i++) {
237 		unsigned int value = data->intervals[i];
238 		if (value <= thresh) {
239 			int64_t diff = value - avg;
240 			stddev += diff * diff;
241 		}
242 	}
243 	do_div(stddev, divisor);
244 	/*
245 	 * The typical interval is obtained when standard deviation is small
246 	 * or standard deviation is small compared to the average interval.
247 	 *
248 	 * int_sqrt() formal parameter type is unsigned long. When the
249 	 * greatest difference to an outlier exceeds ~65 ms * sqrt(divisor)
250 	 * the resulting squared standard deviation exceeds the input domain
251 	 * of int_sqrt on platforms where unsigned long is 32 bits in size.
252 	 * In such case reject the candidate average.
253 	 *
254 	 * Use this result only if there is no timer to wake us up sooner.
255 	 */
256 	if (likely(stddev <= ULONG_MAX)) {
257 		stddev = int_sqrt(stddev);
258 		if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
259 							|| stddev <= 20) {
260 			if (data->expected_us > avg)
261 				data->predicted_us = avg;
262 			return;
263 		}
264 	}
265 
266 	/*
267 	 * If we have outliers to the upside in our distribution, discard
268 	 * those by setting the threshold to exclude these outliers, then
269 	 * calculate the average and standard deviation again. Once we get
270 	 * down to the bottom 3/4 of our samples, stop excluding samples.
271 	 *
272 	 * This can deal with workloads that have long pauses interspersed
273 	 * with sporadic activity with a bunch of short pauses.
274 	 */
275 	if ((divisor * 4) <= INTERVALS * 3)
276 		return;
277 
278 	thresh = max - 1;
279 	goto again;
280 }
281 
282 /**
283  * menu_select - selects the next idle state to enter
284  * @drv: cpuidle driver containing state data
285  * @dev: the CPU
286  */
287 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
288 {
289 	struct menu_device *data = &__get_cpu_var(menu_devices);
290 	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
291 	int i;
292 	int multiplier;
293 	struct timespec t;
294 
295 	if (data->needs_update) {
296 		menu_update(drv, dev);
297 		data->needs_update = 0;
298 	}
299 
300 	data->last_state_idx = 0;
301 	data->exit_us = 0;
302 
303 	/* Special case when user has set very strict latency requirement */
304 	if (unlikely(latency_req == 0))
305 		return 0;
306 
307 	/* determine the expected residency time, round up */
308 	t = ktime_to_timespec(tick_nohz_get_sleep_length());
309 	data->expected_us =
310 		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
311 
312 
313 	data->bucket = which_bucket(data->expected_us);
314 
315 	multiplier = performance_multiplier();
316 
317 	/*
318 	 * if the correction factor is 0 (eg first time init or cpu hotplug
319 	 * etc), we actually want to start out with a unity factor.
320 	 */
321 	if (data->correction_factor[data->bucket] == 0)
322 		data->correction_factor[data->bucket] = RESOLUTION * DECAY;
323 
324 	/*
325 	 * Force the result of multiplication to be 64 bits even if both
326 	 * operands are 32 bits.
327 	 * Make sure to round up for half microseconds.
328 	 */
329 	data->predicted_us = div_round64((uint64_t)data->expected_us *
330 					 data->correction_factor[data->bucket],
331 					 RESOLUTION * DECAY);
332 
333 	get_typical_interval(data);
334 
335 	/*
336 	 * We want to default to C1 (hlt), not to busy polling
337 	 * unless the timer is happening really really soon.
338 	 */
339 	if (data->expected_us > 5 &&
340 	    !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
341 		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
342 		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
343 
344 	/*
345 	 * Find the idle state with the lowest power while satisfying
346 	 * our constraints.
347 	 */
348 	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
349 		struct cpuidle_state *s = &drv->states[i];
350 		struct cpuidle_state_usage *su = &dev->states_usage[i];
351 
352 		if (s->disabled || su->disable)
353 			continue;
354 		if (s->target_residency > data->predicted_us)
355 			continue;
356 		if (s->exit_latency > latency_req)
357 			continue;
358 		if (s->exit_latency * multiplier > data->predicted_us)
359 			continue;
360 
361 		data->last_state_idx = i;
362 		data->exit_us = s->exit_latency;
363 	}
364 
365 	return data->last_state_idx;
366 }
367 
368 /**
369  * menu_reflect - records that data structures need update
370  * @dev: the CPU
371  * @index: the index of actual entered state
372  *
373  * NOTE: it's important to be fast here because this operation will add to
374  *       the overall exit latency.
375  */
376 static void menu_reflect(struct cpuidle_device *dev, int index)
377 {
378 	struct menu_device *data = &__get_cpu_var(menu_devices);
379 	data->last_state_idx = index;
380 	if (index >= 0)
381 		data->needs_update = 1;
382 }
383 
384 /**
385  * menu_update - attempts to guess what happened after entry
386  * @drv: cpuidle driver containing state data
387  * @dev: the CPU
388  */
389 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
390 {
391 	struct menu_device *data = &__get_cpu_var(menu_devices);
392 	int last_idx = data->last_state_idx;
393 	unsigned int last_idle_us = cpuidle_get_last_residency(dev);
394 	struct cpuidle_state *target = &drv->states[last_idx];
395 	unsigned int measured_us;
396 	unsigned int new_factor;
397 
398 	/*
399 	 * Ugh, this idle state doesn't support residency measurements, so we
400 	 * are basically lost in the dark.  As a compromise, assume we slept
401 	 * for the whole expected time.
402 	 */
403 	if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
404 		last_idle_us = data->expected_us;
405 
406 
407 	measured_us = last_idle_us;
408 
409 	/*
410 	 * We correct for the exit latency; we are assuming here that the
411 	 * exit latency happens after the event that we're interested in.
412 	 */
413 	if (measured_us > data->exit_us)
414 		measured_us -= data->exit_us;
415 
416 
417 	/* Update our correction ratio */
418 	new_factor = data->correction_factor[data->bucket];
419 	new_factor -= new_factor / DECAY;
420 
421 	if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
422 		new_factor += RESOLUTION * measured_us / data->expected_us;
423 	else
424 		/*
425 		 * we were idle so long that we count it as a perfect
426 		 * prediction
427 		 */
428 		new_factor += RESOLUTION;
429 
430 	/*
431 	 * We don't want 0 as factor; we always want at least
432 	 * a tiny bit of estimated time. Fortunately, due to rounding,
433 	 * new_factor will stay nonzero regardless of measured_us values
434 	 * and the compiler can eliminate this test as long as DECAY > 1.
435 	 */
436 	if (DECAY == 1 && unlikely(new_factor == 0))
437 		new_factor = 1;
438 
439 	data->correction_factor[data->bucket] = new_factor;
440 
441 	/* update the repeating-pattern data */
442 	data->intervals[data->interval_ptr++] = last_idle_us;
443 	if (data->interval_ptr >= INTERVALS)
444 		data->interval_ptr = 0;
445 }
446 
447 /**
448  * menu_enable_device - scans a CPU's states and does setup
449  * @drv: cpuidle driver
450  * @dev: the CPU
451  */
452 static int menu_enable_device(struct cpuidle_driver *drv,
453 				struct cpuidle_device *dev)
454 {
455 	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
456 
457 	memset(data, 0, sizeof(struct menu_device));
458 
459 	return 0;
460 }
461 
462 static struct cpuidle_governor menu_governor = {
463 	.name =		"menu",
464 	.rating =	20,
465 	.enable =	menu_enable_device,
466 	.select =	menu_select,
467 	.reflect =	menu_reflect,
468 	.owner =	THIS_MODULE,
469 };
470 
471 /**
472  * init_menu - initializes the governor
473  */
474 static int __init init_menu(void)
475 {
476 	return cpuidle_register_governor(&menu_governor);
477 }
478 
479 postcore_initcall(init_menu);
480