xref: /linux/drivers/cpuidle/governors/menu.c (revision f5e4e7fdd57691d5308cf854dd0dbcfd58799e9a)
1 /*
2  * menu.c - the menu idle governor
3  *
4  * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5  * Copyright (C) 2009 Intel Corporation
6  * Author:
7  *        Arjan van de Ven <arjan@linux.intel.com>
8  *
9  * This code is licenced under the GPL version 2 as described
10  * in the COPYING file that acompanies the Linux Kernel.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/pm_qos.h>
16 #include <linux/time.h>
17 #include <linux/ktime.h>
18 #include <linux/hrtimer.h>
19 #include <linux/tick.h>
20 #include <linux/sched.h>
21 #include <linux/math64.h>
22 #include <linux/module.h>
23 
24 #define BUCKETS 12
25 #define INTERVALS 8
26 #define RESOLUTION 1024
27 #define DECAY 8
28 #define MAX_INTERESTING 50000
29 #define STDDEV_THRESH 400
30 
31 
32 /*
33  * Concepts and ideas behind the menu governor
34  *
35  * For the menu governor, there are 3 decision factors for picking a C
36  * state:
37  * 1) Energy break even point
38  * 2) Performance impact
39  * 3) Latency tolerance (from pmqos infrastructure)
40  * These these three factors are treated independently.
41  *
42  * Energy break even point
43  * -----------------------
44  * C state entry and exit have an energy cost, and a certain amount of time in
45  * the  C state is required to actually break even on this cost. CPUIDLE
46  * provides us this duration in the "target_residency" field. So all that we
47  * need is a good prediction of how long we'll be idle. Like the traditional
48  * menu governor, we start with the actual known "next timer event" time.
49  *
50  * Since there are other source of wakeups (interrupts for example) than
51  * the next timer event, this estimation is rather optimistic. To get a
52  * more realistic estimate, a correction factor is applied to the estimate,
53  * that is based on historic behavior. For example, if in the past the actual
54  * duration always was 50% of the next timer tick, the correction factor will
55  * be 0.5.
56  *
57  * menu uses a running average for this correction factor, however it uses a
58  * set of factors, not just a single factor. This stems from the realization
59  * that the ratio is dependent on the order of magnitude of the expected
60  * duration; if we expect 500 milliseconds of idle time the likelihood of
61  * getting an interrupt very early is much higher than if we expect 50 micro
62  * seconds of idle time. A second independent factor that has big impact on
63  * the actual factor is if there is (disk) IO outstanding or not.
64  * (as a special twist, we consider every sleep longer than 50 milliseconds
65  * as perfect; there are no power gains for sleeping longer than this)
66  *
67  * For these two reasons we keep an array of 12 independent factors, that gets
68  * indexed based on the magnitude of the expected duration as well as the
69  * "is IO outstanding" property.
70  *
71  * Repeatable-interval-detector
72  * ----------------------------
73  * There are some cases where "next timer" is a completely unusable predictor:
74  * Those cases where the interval is fixed, for example due to hardware
75  * interrupt mitigation, but also due to fixed transfer rate devices such as
76  * mice.
77  * For this, we use a different predictor: We track the duration of the last 8
78  * intervals and if the stand deviation of these 8 intervals is below a
79  * threshold value, we use the average of these intervals as prediction.
80  *
81  * Limiting Performance Impact
82  * ---------------------------
83  * C states, especially those with large exit latencies, can have a real
84  * noticeable impact on workloads, which is not acceptable for most sysadmins,
85  * and in addition, less performance has a power price of its own.
86  *
87  * As a general rule of thumb, menu assumes that the following heuristic
88  * holds:
89  *     The busier the system, the less impact of C states is acceptable
90  *
91  * This rule-of-thumb is implemented using a performance-multiplier:
92  * If the exit latency times the performance multiplier is longer than
93  * the predicted duration, the C state is not considered a candidate
94  * for selection due to a too high performance impact. So the higher
95  * this multiplier is, the longer we need to be idle to pick a deep C
96  * state, and thus the less likely a busy CPU will hit such a deep
97  * C state.
98  *
99  * Two factors are used in determing this multiplier:
100  * a value of 10 is added for each point of "per cpu load average" we have.
101  * a value of 5 points is added for each process that is waiting for
102  * IO on this CPU.
103  * (these values are experimentally determined)
104  *
105  * The load average factor gives a longer term (few seconds) input to the
106  * decision, while the iowait value gives a cpu local instantanious input.
107  * The iowait factor may look low, but realize that this is also already
108  * represented in the system load average.
109  *
110  */
111 
112 struct menu_device {
113 	int		last_state_idx;
114 	int             needs_update;
115 
116 	unsigned int	expected_us;
117 	u64		predicted_us;
118 	unsigned int	exit_us;
119 	unsigned int	bucket;
120 	u64		correction_factor[BUCKETS];
121 	u32		intervals[INTERVALS];
122 	int		interval_ptr;
123 };
124 
125 
126 #define LOAD_INT(x) ((x) >> FSHIFT)
127 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
128 
129 static int get_loadavg(void)
130 {
131 	unsigned long this = this_cpu_load();
132 
133 
134 	return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
135 }
136 
137 static inline int which_bucket(unsigned int duration)
138 {
139 	int bucket = 0;
140 
141 	/*
142 	 * We keep two groups of stats; one with no
143 	 * IO pending, one without.
144 	 * This allows us to calculate
145 	 * E(duration)|iowait
146 	 */
147 	if (nr_iowait_cpu(smp_processor_id()))
148 		bucket = BUCKETS/2;
149 
150 	if (duration < 10)
151 		return bucket;
152 	if (duration < 100)
153 		return bucket + 1;
154 	if (duration < 1000)
155 		return bucket + 2;
156 	if (duration < 10000)
157 		return bucket + 3;
158 	if (duration < 100000)
159 		return bucket + 4;
160 	return bucket + 5;
161 }
162 
163 /*
164  * Return a multiplier for the exit latency that is intended
165  * to take performance requirements into account.
166  * The more performance critical we estimate the system
167  * to be, the higher this multiplier, and thus the higher
168  * the barrier to go to an expensive C state.
169  */
170 static inline int performance_multiplier(void)
171 {
172 	int mult = 1;
173 
174 	/* for higher loadavg, we are more reluctant */
175 
176 	mult += 2 * get_loadavg();
177 
178 	/* for IO wait tasks (per cpu!) we add 5x each */
179 	mult += 10 * nr_iowait_cpu(smp_processor_id());
180 
181 	return mult;
182 }
183 
184 static DEFINE_PER_CPU(struct menu_device, menu_devices);
185 
186 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
187 
188 /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
189 static u64 div_round64(u64 dividend, u32 divisor)
190 {
191 	return div_u64(dividend + (divisor / 2), divisor);
192 }
193 
194 /*
195  * Try detecting repeating patterns by keeping track of the last 8
196  * intervals, and checking if the standard deviation of that set
197  * of points is below a threshold. If it is... then use the
198  * average of these 8 points as the estimated value.
199  */
200 static void get_typical_interval(struct menu_device *data)
201 {
202 	int i = 0, divisor = 0;
203 	uint64_t max = 0, avg = 0, stddev = 0;
204 	int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
205 
206 again:
207 
208 	/* first calculate average and standard deviation of the past */
209 	max = avg = divisor = stddev = 0;
210 	for (i = 0; i < INTERVALS; i++) {
211 		int64_t value = data->intervals[i];
212 		if (value <= thresh) {
213 			avg += value;
214 			divisor++;
215 			if (value > max)
216 				max = value;
217 		}
218 	}
219 	do_div(avg, divisor);
220 
221 	for (i = 0; i < INTERVALS; i++) {
222 		int64_t value = data->intervals[i];
223 		if (value <= thresh) {
224 			int64_t diff = value - avg;
225 			stddev += diff * diff;
226 		}
227 	}
228 	do_div(stddev, divisor);
229 	stddev = int_sqrt(stddev);
230 	/*
231 	 * If we have outliers to the upside in our distribution, discard
232 	 * those by setting the threshold to exclude these outliers, then
233 	 * calculate the average and standard deviation again. Once we get
234 	 * down to the bottom 3/4 of our samples, stop excluding samples.
235 	 *
236 	 * This can deal with workloads that have long pauses interspersed
237 	 * with sporadic activity with a bunch of short pauses.
238 	 *
239 	 * The typical interval is obtained when standard deviation is small
240 	 * or standard deviation is small compared to the average interval.
241 	 */
242 	if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
243 							|| stddev <= 20) {
244 		data->predicted_us = avg;
245 		return;
246 
247 	} else if ((divisor * 4) > INTERVALS * 3) {
248 		/* Exclude the max interval */
249 		thresh = max - 1;
250 		goto again;
251 	}
252 }
253 
254 /**
255  * menu_select - selects the next idle state to enter
256  * @drv: cpuidle driver containing state data
257  * @dev: the CPU
258  */
259 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
260 {
261 	struct menu_device *data = &__get_cpu_var(menu_devices);
262 	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
263 	int i;
264 	int multiplier;
265 	struct timespec t;
266 
267 	if (data->needs_update) {
268 		menu_update(drv, dev);
269 		data->needs_update = 0;
270 	}
271 
272 	data->last_state_idx = 0;
273 	data->exit_us = 0;
274 
275 	/* Special case when user has set very strict latency requirement */
276 	if (unlikely(latency_req == 0))
277 		return 0;
278 
279 	/* determine the expected residency time, round up */
280 	t = ktime_to_timespec(tick_nohz_get_sleep_length());
281 	data->expected_us =
282 		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
283 
284 
285 	data->bucket = which_bucket(data->expected_us);
286 
287 	multiplier = performance_multiplier();
288 
289 	/*
290 	 * if the correction factor is 0 (eg first time init or cpu hotplug
291 	 * etc), we actually want to start out with a unity factor.
292 	 */
293 	if (data->correction_factor[data->bucket] == 0)
294 		data->correction_factor[data->bucket] = RESOLUTION * DECAY;
295 
296 	/* Make sure to round up for half microseconds */
297 	data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
298 					 RESOLUTION * DECAY);
299 
300 	get_typical_interval(data);
301 
302 	/*
303 	 * We want to default to C1 (hlt), not to busy polling
304 	 * unless the timer is happening really really soon.
305 	 */
306 	if (data->expected_us > 5 &&
307 	    !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
308 		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
309 		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
310 
311 	/*
312 	 * Find the idle state with the lowest power while satisfying
313 	 * our constraints.
314 	 */
315 	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
316 		struct cpuidle_state *s = &drv->states[i];
317 		struct cpuidle_state_usage *su = &dev->states_usage[i];
318 
319 		if (s->disabled || su->disable)
320 			continue;
321 		if (s->target_residency > data->predicted_us)
322 			continue;
323 		if (s->exit_latency > latency_req)
324 			continue;
325 		if (s->exit_latency * multiplier > data->predicted_us)
326 			continue;
327 
328 		data->last_state_idx = i;
329 		data->exit_us = s->exit_latency;
330 	}
331 
332 	return data->last_state_idx;
333 }
334 
335 /**
336  * menu_reflect - records that data structures need update
337  * @dev: the CPU
338  * @index: the index of actual entered state
339  *
340  * NOTE: it's important to be fast here because this operation will add to
341  *       the overall exit latency.
342  */
343 static void menu_reflect(struct cpuidle_device *dev, int index)
344 {
345 	struct menu_device *data = &__get_cpu_var(menu_devices);
346 	data->last_state_idx = index;
347 	if (index >= 0)
348 		data->needs_update = 1;
349 }
350 
351 /**
352  * menu_update - attempts to guess what happened after entry
353  * @drv: cpuidle driver containing state data
354  * @dev: the CPU
355  */
356 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
357 {
358 	struct menu_device *data = &__get_cpu_var(menu_devices);
359 	int last_idx = data->last_state_idx;
360 	unsigned int last_idle_us = cpuidle_get_last_residency(dev);
361 	struct cpuidle_state *target = &drv->states[last_idx];
362 	unsigned int measured_us;
363 	u64 new_factor;
364 
365 	/*
366 	 * Ugh, this idle state doesn't support residency measurements, so we
367 	 * are basically lost in the dark.  As a compromise, assume we slept
368 	 * for the whole expected time.
369 	 */
370 	if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
371 		last_idle_us = data->expected_us;
372 
373 
374 	measured_us = last_idle_us;
375 
376 	/*
377 	 * We correct for the exit latency; we are assuming here that the
378 	 * exit latency happens after the event that we're interested in.
379 	 */
380 	if (measured_us > data->exit_us)
381 		measured_us -= data->exit_us;
382 
383 
384 	/* update our correction ratio */
385 
386 	new_factor = data->correction_factor[data->bucket]
387 			* (DECAY - 1) / DECAY;
388 
389 	if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
390 		new_factor += RESOLUTION * measured_us / data->expected_us;
391 	else
392 		/*
393 		 * we were idle so long that we count it as a perfect
394 		 * prediction
395 		 */
396 		new_factor += RESOLUTION;
397 
398 	/*
399 	 * We don't want 0 as factor; we always want at least
400 	 * a tiny bit of estimated time.
401 	 */
402 	if (new_factor == 0)
403 		new_factor = 1;
404 
405 	data->correction_factor[data->bucket] = new_factor;
406 
407 	/* update the repeating-pattern data */
408 	data->intervals[data->interval_ptr++] = last_idle_us;
409 	if (data->interval_ptr >= INTERVALS)
410 		data->interval_ptr = 0;
411 }
412 
413 /**
414  * menu_enable_device - scans a CPU's states and does setup
415  * @drv: cpuidle driver
416  * @dev: the CPU
417  */
418 static int menu_enable_device(struct cpuidle_driver *drv,
419 				struct cpuidle_device *dev)
420 {
421 	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
422 
423 	memset(data, 0, sizeof(struct menu_device));
424 
425 	return 0;
426 }
427 
428 static struct cpuidle_governor menu_governor = {
429 	.name =		"menu",
430 	.rating =	20,
431 	.enable =	menu_enable_device,
432 	.select =	menu_select,
433 	.reflect =	menu_reflect,
434 	.owner =	THIS_MODULE,
435 };
436 
437 /**
438  * init_menu - initializes the governor
439  */
440 static int __init init_menu(void)
441 {
442 	return cpuidle_register_governor(&menu_governor);
443 }
444 
445 /**
446  * exit_menu - exits the governor
447  */
448 static void __exit exit_menu(void)
449 {
450 	cpuidle_unregister_governor(&menu_governor);
451 }
452 
453 MODULE_LICENSE("GPL");
454 module_init(init_menu);
455 module_exit(exit_menu);
456