xref: /linux/drivers/cpufreq/cpufreq_governor.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * drivers/cpufreq/cpufreq_governor.c
3  *
4  * CPUFREQ governors common code
5  *
6  * Copyright	(C) 2001 Russell King
7  *		(C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8  *		(C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9  *		(C) 2009 Alexander Clouter <alex@digriz.org.uk>
10  *		(c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/slab.h>
22 
23 #include "cpufreq_governor.h"
24 
25 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
26 
27 static DEFINE_MUTEX(gov_dbs_data_mutex);
28 
29 /* Common sysfs tunables */
30 /**
31  * store_sampling_rate - update sampling rate effective immediately if needed.
32  *
33  * If new rate is smaller than the old, simply updating
34  * dbs.sampling_rate might not be appropriate. For example, if the
35  * original sampling_rate was 1 second and the requested new sampling rate is 10
36  * ms because the user needs immediate reaction from ondemand governor, but not
37  * sure if higher frequency will be required or not, then, the governor may
38  * change the sampling rate too late; up to 1 second later. Thus, if we are
39  * reducing the sampling rate, we need to make the new value effective
40  * immediately.
41  *
42  * This must be called with dbs_data->mutex held, otherwise traversing
43  * policy_dbs_list isn't safe.
44  */
45 ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
46 			    size_t count)
47 {
48 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
49 	struct policy_dbs_info *policy_dbs;
50 	unsigned int rate;
51 	int ret;
52 	ret = sscanf(buf, "%u", &rate);
53 	if (ret != 1)
54 		return -EINVAL;
55 
56 	dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
57 
58 	/*
59 	 * We are operating under dbs_data->mutex and so the list and its
60 	 * entries can't be freed concurrently.
61 	 */
62 	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
63 		mutex_lock(&policy_dbs->update_mutex);
64 		/*
65 		 * On 32-bit architectures this may race with the
66 		 * sample_delay_ns read in dbs_update_util_handler(), but that
67 		 * really doesn't matter.  If the read returns a value that's
68 		 * too big, the sample will be skipped, but the next invocation
69 		 * of dbs_update_util_handler() (when the update has been
70 		 * completed) will take a sample.
71 		 *
72 		 * If this runs in parallel with dbs_work_handler(), we may end
73 		 * up overwriting the sample_delay_ns value that it has just
74 		 * written, but it will be corrected next time a sample is
75 		 * taken, so it shouldn't be significant.
76 		 */
77 		gov_update_sample_delay(policy_dbs, 0);
78 		mutex_unlock(&policy_dbs->update_mutex);
79 	}
80 
81 	return count;
82 }
83 EXPORT_SYMBOL_GPL(store_sampling_rate);
84 
85 /**
86  * gov_update_cpu_data - Update CPU load data.
87  * @dbs_data: Top-level governor data pointer.
88  *
89  * Update CPU load data for all CPUs in the domain governed by @dbs_data
90  * (that may be a single policy or a bunch of them if governor tunables are
91  * system-wide).
92  *
93  * Call under the @dbs_data mutex.
94  */
95 void gov_update_cpu_data(struct dbs_data *dbs_data)
96 {
97 	struct policy_dbs_info *policy_dbs;
98 
99 	list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
100 		unsigned int j;
101 
102 		for_each_cpu(j, policy_dbs->policy->cpus) {
103 			struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
104 
105 			j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
106 								  dbs_data->io_is_busy);
107 			if (dbs_data->ignore_nice_load)
108 				j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
109 		}
110 	}
111 }
112 EXPORT_SYMBOL_GPL(gov_update_cpu_data);
113 
114 unsigned int dbs_update(struct cpufreq_policy *policy)
115 {
116 	struct policy_dbs_info *policy_dbs = policy->governor_data;
117 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
118 	unsigned int ignore_nice = dbs_data->ignore_nice_load;
119 	unsigned int max_load = 0, idle_periods = UINT_MAX;
120 	unsigned int sampling_rate, io_busy, j;
121 
122 	/*
123 	 * Sometimes governors may use an additional multiplier to increase
124 	 * sample delays temporarily.  Apply that multiplier to sampling_rate
125 	 * so as to keep the wake-up-from-idle detection logic a bit
126 	 * conservative.
127 	 */
128 	sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
129 	/*
130 	 * For the purpose of ondemand, waiting for disk IO is an indication
131 	 * that you're performance critical, and not that the system is actually
132 	 * idle, so do not add the iowait time to the CPU idle time then.
133 	 */
134 	io_busy = dbs_data->io_is_busy;
135 
136 	/* Get Absolute Load */
137 	for_each_cpu(j, policy->cpus) {
138 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
139 		u64 update_time, cur_idle_time;
140 		unsigned int idle_time, time_elapsed;
141 		unsigned int load;
142 
143 		cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
144 
145 		time_elapsed = update_time - j_cdbs->prev_update_time;
146 		j_cdbs->prev_update_time = update_time;
147 
148 		idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
149 		j_cdbs->prev_cpu_idle = cur_idle_time;
150 
151 		if (ignore_nice) {
152 			u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
153 
154 			idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
155 			j_cdbs->prev_cpu_nice = cur_nice;
156 		}
157 
158 		if (unlikely(!time_elapsed)) {
159 			/*
160 			 * That can only happen when this function is called
161 			 * twice in a row with a very short interval between the
162 			 * calls, so the previous load value can be used then.
163 			 */
164 			load = j_cdbs->prev_load;
165 		} else if (unlikely(time_elapsed > 2 * sampling_rate &&
166 				    j_cdbs->prev_load)) {
167 			/*
168 			 * If the CPU had gone completely idle and a task has
169 			 * just woken up on this CPU now, it would be unfair to
170 			 * calculate 'load' the usual way for this elapsed
171 			 * time-window, because it would show near-zero load,
172 			 * irrespective of how CPU intensive that task actually
173 			 * was. This is undesirable for latency-sensitive bursty
174 			 * workloads.
175 			 *
176 			 * To avoid this, reuse the 'load' from the previous
177 			 * time-window and give this task a chance to start with
178 			 * a reasonably high CPU frequency. However, that
179 			 * shouldn't be over-done, lest we get stuck at a high
180 			 * load (high frequency) for too long, even when the
181 			 * current system load has actually dropped down, so
182 			 * clear prev_load to guarantee that the load will be
183 			 * computed again next time.
184 			 *
185 			 * Detecting this situation is easy: the governor's
186 			 * utilization update handler would not have run during
187 			 * CPU-idle periods.  Hence, an unusually large
188 			 * 'time_elapsed' (as compared to the sampling rate)
189 			 * indicates this scenario.
190 			 */
191 			load = j_cdbs->prev_load;
192 			j_cdbs->prev_load = 0;
193 		} else {
194 			if (time_elapsed >= idle_time) {
195 				load = 100 * (time_elapsed - idle_time) / time_elapsed;
196 			} else {
197 				/*
198 				 * That can happen if idle_time is returned by
199 				 * get_cpu_idle_time_jiffy().  In that case
200 				 * idle_time is roughly equal to the difference
201 				 * between time_elapsed and "busy time" obtained
202 				 * from CPU statistics.  Then, the "busy time"
203 				 * can end up being greater than time_elapsed
204 				 * (for example, if jiffies_64 and the CPU
205 				 * statistics are updated by different CPUs),
206 				 * so idle_time may in fact be negative.  That
207 				 * means, though, that the CPU was busy all
208 				 * the time (on the rough average) during the
209 				 * last sampling interval and 100 can be
210 				 * returned as the load.
211 				 */
212 				load = (int)idle_time < 0 ? 100 : 0;
213 			}
214 			j_cdbs->prev_load = load;
215 		}
216 
217 		if (time_elapsed > 2 * sampling_rate) {
218 			unsigned int periods = time_elapsed / sampling_rate;
219 
220 			if (periods < idle_periods)
221 				idle_periods = periods;
222 		}
223 
224 		if (load > max_load)
225 			max_load = load;
226 	}
227 
228 	policy_dbs->idle_periods = idle_periods;
229 
230 	return max_load;
231 }
232 EXPORT_SYMBOL_GPL(dbs_update);
233 
234 static void dbs_work_handler(struct work_struct *work)
235 {
236 	struct policy_dbs_info *policy_dbs;
237 	struct cpufreq_policy *policy;
238 	struct dbs_governor *gov;
239 
240 	policy_dbs = container_of(work, struct policy_dbs_info, work);
241 	policy = policy_dbs->policy;
242 	gov = dbs_governor_of(policy);
243 
244 	/*
245 	 * Make sure cpufreq_governor_limits() isn't evaluating load or the
246 	 * ondemand governor isn't updating the sampling rate in parallel.
247 	 */
248 	mutex_lock(&policy_dbs->update_mutex);
249 	gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
250 	mutex_unlock(&policy_dbs->update_mutex);
251 
252 	/* Allow the utilization update handler to queue up more work. */
253 	atomic_set(&policy_dbs->work_count, 0);
254 	/*
255 	 * If the update below is reordered with respect to the sample delay
256 	 * modification, the utilization update handler may end up using a stale
257 	 * sample delay value.
258 	 */
259 	smp_wmb();
260 	policy_dbs->work_in_progress = false;
261 }
262 
263 static void dbs_irq_work(struct irq_work *irq_work)
264 {
265 	struct policy_dbs_info *policy_dbs;
266 
267 	policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
268 	schedule_work_on(smp_processor_id(), &policy_dbs->work);
269 }
270 
271 static void dbs_update_util_handler(struct update_util_data *data, u64 time,
272 				    unsigned int flags)
273 {
274 	struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
275 	struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
276 	u64 delta_ns, lst;
277 
278 	/*
279 	 * The work may not be allowed to be queued up right now.
280 	 * Possible reasons:
281 	 * - Work has already been queued up or is in progress.
282 	 * - It is too early (too little time from the previous sample).
283 	 */
284 	if (policy_dbs->work_in_progress)
285 		return;
286 
287 	/*
288 	 * If the reads below are reordered before the check above, the value
289 	 * of sample_delay_ns used in the computation may be stale.
290 	 */
291 	smp_rmb();
292 	lst = READ_ONCE(policy_dbs->last_sample_time);
293 	delta_ns = time - lst;
294 	if ((s64)delta_ns < policy_dbs->sample_delay_ns)
295 		return;
296 
297 	/*
298 	 * If the policy is not shared, the irq_work may be queued up right away
299 	 * at this point.  Otherwise, we need to ensure that only one of the
300 	 * CPUs sharing the policy will do that.
301 	 */
302 	if (policy_dbs->is_shared) {
303 		if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
304 			return;
305 
306 		/*
307 		 * If another CPU updated last_sample_time in the meantime, we
308 		 * shouldn't be here, so clear the work counter and bail out.
309 		 */
310 		if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
311 			atomic_set(&policy_dbs->work_count, 0);
312 			return;
313 		}
314 	}
315 
316 	policy_dbs->last_sample_time = time;
317 	policy_dbs->work_in_progress = true;
318 	irq_work_queue(&policy_dbs->irq_work);
319 }
320 
321 static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
322 				unsigned int delay_us)
323 {
324 	struct cpufreq_policy *policy = policy_dbs->policy;
325 	int cpu;
326 
327 	gov_update_sample_delay(policy_dbs, delay_us);
328 	policy_dbs->last_sample_time = 0;
329 
330 	for_each_cpu(cpu, policy->cpus) {
331 		struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
332 
333 		cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
334 					     dbs_update_util_handler);
335 	}
336 }
337 
338 static inline void gov_clear_update_util(struct cpufreq_policy *policy)
339 {
340 	int i;
341 
342 	for_each_cpu(i, policy->cpus)
343 		cpufreq_remove_update_util_hook(i);
344 
345 	synchronize_sched();
346 }
347 
348 static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
349 						     struct dbs_governor *gov)
350 {
351 	struct policy_dbs_info *policy_dbs;
352 	int j;
353 
354 	/* Allocate memory for per-policy governor data. */
355 	policy_dbs = gov->alloc();
356 	if (!policy_dbs)
357 		return NULL;
358 
359 	policy_dbs->policy = policy;
360 	mutex_init(&policy_dbs->update_mutex);
361 	atomic_set(&policy_dbs->work_count, 0);
362 	init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
363 	INIT_WORK(&policy_dbs->work, dbs_work_handler);
364 
365 	/* Set policy_dbs for all CPUs, online+offline */
366 	for_each_cpu(j, policy->related_cpus) {
367 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
368 
369 		j_cdbs->policy_dbs = policy_dbs;
370 	}
371 	return policy_dbs;
372 }
373 
374 static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
375 				 struct dbs_governor *gov)
376 {
377 	int j;
378 
379 	mutex_destroy(&policy_dbs->update_mutex);
380 
381 	for_each_cpu(j, policy_dbs->policy->related_cpus) {
382 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
383 
384 		j_cdbs->policy_dbs = NULL;
385 		j_cdbs->update_util.func = NULL;
386 	}
387 	gov->free(policy_dbs);
388 }
389 
390 int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
391 {
392 	struct dbs_governor *gov = dbs_governor_of(policy);
393 	struct dbs_data *dbs_data;
394 	struct policy_dbs_info *policy_dbs;
395 	unsigned int latency;
396 	int ret = 0;
397 
398 	/* State should be equivalent to EXIT */
399 	if (policy->governor_data)
400 		return -EBUSY;
401 
402 	policy_dbs = alloc_policy_dbs_info(policy, gov);
403 	if (!policy_dbs)
404 		return -ENOMEM;
405 
406 	/* Protect gov->gdbs_data against concurrent updates. */
407 	mutex_lock(&gov_dbs_data_mutex);
408 
409 	dbs_data = gov->gdbs_data;
410 	if (dbs_data) {
411 		if (WARN_ON(have_governor_per_policy())) {
412 			ret = -EINVAL;
413 			goto free_policy_dbs_info;
414 		}
415 		policy_dbs->dbs_data = dbs_data;
416 		policy->governor_data = policy_dbs;
417 
418 		gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
419 		goto out;
420 	}
421 
422 	dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
423 	if (!dbs_data) {
424 		ret = -ENOMEM;
425 		goto free_policy_dbs_info;
426 	}
427 
428 	gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
429 
430 	ret = gov->init(dbs_data);
431 	if (ret)
432 		goto free_policy_dbs_info;
433 
434 	/* policy latency is in ns. Convert it to us first */
435 	latency = policy->cpuinfo.transition_latency / 1000;
436 	if (latency == 0)
437 		latency = 1;
438 
439 	/* Bring kernel and HW constraints together */
440 	dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
441 					  MIN_LATENCY_MULTIPLIER * latency);
442 	dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
443 				      LATENCY_MULTIPLIER * latency);
444 
445 	if (!have_governor_per_policy())
446 		gov->gdbs_data = dbs_data;
447 
448 	policy_dbs->dbs_data = dbs_data;
449 	policy->governor_data = policy_dbs;
450 
451 	gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
452 	ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
453 				   get_governor_parent_kobj(policy),
454 				   "%s", gov->gov.name);
455 	if (!ret)
456 		goto out;
457 
458 	/* Failure, so roll back. */
459 	pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
460 
461 	policy->governor_data = NULL;
462 
463 	if (!have_governor_per_policy())
464 		gov->gdbs_data = NULL;
465 	gov->exit(dbs_data);
466 	kfree(dbs_data);
467 
468 free_policy_dbs_info:
469 	free_policy_dbs_info(policy_dbs, gov);
470 
471 out:
472 	mutex_unlock(&gov_dbs_data_mutex);
473 	return ret;
474 }
475 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
476 
477 void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
478 {
479 	struct dbs_governor *gov = dbs_governor_of(policy);
480 	struct policy_dbs_info *policy_dbs = policy->governor_data;
481 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
482 	unsigned int count;
483 
484 	/* Protect gov->gdbs_data against concurrent updates. */
485 	mutex_lock(&gov_dbs_data_mutex);
486 
487 	count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
488 
489 	policy->governor_data = NULL;
490 
491 	if (!count) {
492 		if (!have_governor_per_policy())
493 			gov->gdbs_data = NULL;
494 
495 		gov->exit(dbs_data);
496 		kfree(dbs_data);
497 	}
498 
499 	free_policy_dbs_info(policy_dbs, gov);
500 
501 	mutex_unlock(&gov_dbs_data_mutex);
502 }
503 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
504 
505 int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
506 {
507 	struct dbs_governor *gov = dbs_governor_of(policy);
508 	struct policy_dbs_info *policy_dbs = policy->governor_data;
509 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
510 	unsigned int sampling_rate, ignore_nice, j;
511 	unsigned int io_busy;
512 
513 	if (!policy->cur)
514 		return -EINVAL;
515 
516 	policy_dbs->is_shared = policy_is_shared(policy);
517 	policy_dbs->rate_mult = 1;
518 
519 	sampling_rate = dbs_data->sampling_rate;
520 	ignore_nice = dbs_data->ignore_nice_load;
521 	io_busy = dbs_data->io_is_busy;
522 
523 	for_each_cpu(j, policy->cpus) {
524 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
525 
526 		j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
527 		/*
528 		 * Make the first invocation of dbs_update() compute the load.
529 		 */
530 		j_cdbs->prev_load = 0;
531 
532 		if (ignore_nice)
533 			j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
534 	}
535 
536 	gov->start(policy);
537 
538 	gov_set_update_util(policy_dbs, sampling_rate);
539 	return 0;
540 }
541 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
542 
543 void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
544 {
545 	struct policy_dbs_info *policy_dbs = policy->governor_data;
546 
547 	gov_clear_update_util(policy_dbs->policy);
548 	irq_work_sync(&policy_dbs->irq_work);
549 	cancel_work_sync(&policy_dbs->work);
550 	atomic_set(&policy_dbs->work_count, 0);
551 	policy_dbs->work_in_progress = false;
552 }
553 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
554 
555 void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
556 {
557 	struct policy_dbs_info *policy_dbs = policy->governor_data;
558 
559 	mutex_lock(&policy_dbs->update_mutex);
560 	cpufreq_policy_apply_limits(policy);
561 	gov_update_sample_delay(policy_dbs, 0);
562 
563 	mutex_unlock(&policy_dbs->update_mutex);
564 }
565 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
566