cpufreq_schedutil.c (cc9263874b42bf98209dce0afe698b550648e770) cpufreq_schedutil.c (58919e83c85c3a3c5fb34025dc0e95ddd998c478)
1/*
2 * CPUFreq governor based on scheduler-provided CPU utilization data.
3 *
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/cpufreq.h>
1/*
2 * CPUFreq governor based on scheduler-provided CPU utilization data.
3 *
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/cpufreq.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <trace/events/power.h>
18
19#include "sched.h"
20
21struct sugov_tunables {
22 struct gov_attr_set attr_set;
23 unsigned int rate_limit_us;

--- 24 unchanged lines hidden (view full) ---

48 struct sugov_policy *sg_policy;
49
50 unsigned int cached_raw_freq;
51
52 /* The fields below are only needed when sharing a policy. */
53 unsigned long util;
54 unsigned long max;
55 u64 last_update;
15#include <linux/slab.h>
16#include <trace/events/power.h>
17
18#include "sched.h"
19
20struct sugov_tunables {
21 struct gov_attr_set attr_set;
22 unsigned int rate_limit_us;

--- 24 unchanged lines hidden (view full) ---

47 struct sugov_policy *sg_policy;
48
49 unsigned int cached_raw_freq;
50
51 /* The fields below are only needed when sharing a policy. */
52 unsigned long util;
53 unsigned long max;
54 u64 last_update;
55 unsigned int flags;
56};
57
58static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
59
60/************************ Governor internals ***********************/
61
62static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
63{

--- 75 unchanged lines hidden (view full) ---

139 freq = (freq + (freq >> 2)) * util / max;
140
141 if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
142 return sg_policy->next_freq;
143 sg_cpu->cached_raw_freq = freq;
144 return cpufreq_driver_resolve_freq(policy, freq);
145}
146
56};
57
58static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
59
60/************************ Governor internals ***********************/
61
62static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
63{

--- 75 unchanged lines hidden (view full) ---

139 freq = (freq + (freq >> 2)) * util / max;
140
141 if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
142 return sg_policy->next_freq;
143 sg_cpu->cached_raw_freq = freq;
144 return cpufreq_driver_resolve_freq(policy, freq);
145}
146
147static void sugov_get_util(unsigned long *util, unsigned long *max)
148{
149 struct rq *rq = this_rq();
150 unsigned long cfs_max = rq->cpu_capacity_orig;
151
152 *util = min(rq->cfs.avg.util_avg, cfs_max);
153 *max = cfs_max;
154}
155
147static void sugov_update_single(struct update_util_data *hook, u64 time,
156static void sugov_update_single(struct update_util_data *hook, u64 time,
148 unsigned long util, unsigned long max)
157 unsigned int flags)
149{
150 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
151 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
152 struct cpufreq_policy *policy = sg_policy->policy;
158{
159 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
160 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
161 struct cpufreq_policy *policy = sg_policy->policy;
162 unsigned long util, max;
153 unsigned int next_f;
154
155 if (!sugov_should_update_freq(sg_policy, time))
156 return;
157
163 unsigned int next_f;
164
165 if (!sugov_should_update_freq(sg_policy, time))
166 return;
167
158 next_f = util == ULONG_MAX ? policy->cpuinfo.max_freq :
159 get_next_freq(sg_cpu, util, max);
168 if (flags & SCHED_CPUFREQ_RT_DL) {
169 next_f = policy->cpuinfo.max_freq;
170 } else {
171 sugov_get_util(&util, &max);
172 next_f = get_next_freq(sg_cpu, util, max);
173 }
160 sugov_update_commit(sg_policy, time, next_f);
161}
162
163static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
174 sugov_update_commit(sg_policy, time, next_f);
175}
176
177static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
164 unsigned long util, unsigned long max)
178 unsigned long util, unsigned long max,
179 unsigned int flags)
165{
166 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
167 struct cpufreq_policy *policy = sg_policy->policy;
168 unsigned int max_f = policy->cpuinfo.max_freq;
169 u64 last_freq_update_time = sg_policy->last_freq_update_time;
170 unsigned int j;
171
180{
181 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
182 struct cpufreq_policy *policy = sg_policy->policy;
183 unsigned int max_f = policy->cpuinfo.max_freq;
184 u64 last_freq_update_time = sg_policy->last_freq_update_time;
185 unsigned int j;
186
172 if (util == ULONG_MAX)
187 if (flags & SCHED_CPUFREQ_RT_DL)
173 return max_f;
174
175 for_each_cpu(j, policy->cpus) {
176 struct sugov_cpu *j_sg_cpu;
177 unsigned long j_util, j_max;
178 s64 delta_ns;
179
180 if (j == smp_processor_id())

--- 6 unchanged lines hidden (view full) ---

187 * of the CPU utilization and the last frequency update is long
188 * enough, don't take the CPU into account as it probably is
189 * idle now.
190 */
191 delta_ns = last_freq_update_time - j_sg_cpu->last_update;
192 if (delta_ns > TICK_NSEC)
193 continue;
194
188 return max_f;
189
190 for_each_cpu(j, policy->cpus) {
191 struct sugov_cpu *j_sg_cpu;
192 unsigned long j_util, j_max;
193 s64 delta_ns;
194
195 if (j == smp_processor_id())

--- 6 unchanged lines hidden (view full) ---

202 * of the CPU utilization and the last frequency update is long
203 * enough, don't take the CPU into account as it probably is
204 * idle now.
205 */
206 delta_ns = last_freq_update_time - j_sg_cpu->last_update;
207 if (delta_ns > TICK_NSEC)
208 continue;
209
195 j_util = j_sg_cpu->util;
196 if (j_util == ULONG_MAX)
210 if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
197 return max_f;
198
211 return max_f;
212
213 j_util = j_sg_cpu->util;
199 j_max = j_sg_cpu->max;
200 if (j_util * max > j_max * util) {
201 util = j_util;
202 max = j_max;
203 }
204 }
205
206 return get_next_freq(sg_cpu, util, max);
207}
208
209static void sugov_update_shared(struct update_util_data *hook, u64 time,
214 j_max = j_sg_cpu->max;
215 if (j_util * max > j_max * util) {
216 util = j_util;
217 max = j_max;
218 }
219 }
220
221 return get_next_freq(sg_cpu, util, max);
222}
223
224static void sugov_update_shared(struct update_util_data *hook, u64 time,
210 unsigned long util, unsigned long max)
225 unsigned int flags)
211{
212 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
213 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
226{
227 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
228 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
229 unsigned long util, max;
214 unsigned int next_f;
215
230 unsigned int next_f;
231
232 sugov_get_util(&util, &max);
233
216 raw_spin_lock(&sg_policy->update_lock);
217
218 sg_cpu->util = util;
219 sg_cpu->max = max;
234 raw_spin_lock(&sg_policy->update_lock);
235
236 sg_cpu->util = util;
237 sg_cpu->max = max;
238 sg_cpu->flags = flags;
220 sg_cpu->last_update = time;
221
222 if (sugov_should_update_freq(sg_policy, time)) {
239 sg_cpu->last_update = time;
240
241 if (sugov_should_update_freq(sg_policy, time)) {
223 next_f = sugov_next_freq_shared(sg_cpu, util, max);
242 next_f = sugov_next_freq_shared(sg_cpu, util, max, flags);
224 sugov_update_commit(sg_policy, time, next_f);
225 }
226
227 raw_spin_unlock(&sg_policy->update_lock);
228}
229
230static void sugov_work(struct work_struct *work)
231{

--- 207 unchanged lines hidden (view full) ---

439 sg_policy->work_in_progress = false;
440 sg_policy->need_freq_update = false;
441
442 for_each_cpu(cpu, policy->cpus) {
443 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
444
445 sg_cpu->sg_policy = sg_policy;
446 if (policy_is_shared(policy)) {
243 sugov_update_commit(sg_policy, time, next_f);
244 }
245
246 raw_spin_unlock(&sg_policy->update_lock);
247}
248
249static void sugov_work(struct work_struct *work)
250{

--- 207 unchanged lines hidden (view full) ---

458 sg_policy->work_in_progress = false;
459 sg_policy->need_freq_update = false;
460
461 for_each_cpu(cpu, policy->cpus) {
462 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
463
464 sg_cpu->sg_policy = sg_policy;
465 if (policy_is_shared(policy)) {
447 sg_cpu->util = ULONG_MAX;
466 sg_cpu->util = 0;
448 sg_cpu->max = 0;
467 sg_cpu->max = 0;
468 sg_cpu->flags = SCHED_CPUFREQ_RT;
449 sg_cpu->last_update = 0;
450 sg_cpu->cached_raw_freq = 0;
451 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
452 sugov_update_shared);
453 } else {
454 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
455 sugov_update_single);
456 }

--- 33 unchanged lines hidden (view full) ---

490 .owner = THIS_MODULE,
491 .init = sugov_init,
492 .exit = sugov_exit,
493 .start = sugov_start,
494 .stop = sugov_stop,
495 .limits = sugov_limits,
496};
497
469 sg_cpu->last_update = 0;
470 sg_cpu->cached_raw_freq = 0;
471 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
472 sugov_update_shared);
473 } else {
474 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
475 sugov_update_single);
476 }

--- 33 unchanged lines hidden (view full) ---

510 .owner = THIS_MODULE,
511 .init = sugov_init,
512 .exit = sugov_exit,
513 .start = sugov_start,
514 .stop = sugov_stop,
515 .limits = sugov_limits,
516};
517
498static int __init sugov_module_init(void)
499{
500 return cpufreq_register_governor(&schedutil_gov);
501}
502
503static void __exit sugov_module_exit(void)
504{
505 cpufreq_unregister_governor(&schedutil_gov);
506}
507
508MODULE_AUTHOR("Rafael J. Wysocki <rafael.j.wysocki@intel.com>");
509MODULE_DESCRIPTION("Utilization-based CPU frequency selection");
510MODULE_LICENSE("GPL");
511
512#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
513struct cpufreq_governor *cpufreq_default_governor(void)
514{
515 return &schedutil_gov;
516}
518#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
519struct cpufreq_governor *cpufreq_default_governor(void)
520{
521 return &schedutil_gov;
522}
517
518fs_initcall(sugov_module_init);
519#else
520module_init(sugov_module_init);
521#endif
523#endif
522module_exit(sugov_module_exit);
524
525static int __init sugov_register(void)
526{
527 return cpufreq_register_governor(&schedutil_gov);
528}
529fs_initcall(sugov_register);