1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_ENERGY_MODEL_H
3 #define _LINUX_ENERGY_MODEL_H
4 #include <linux/cpumask.h>
5 #include <linux/device.h>
6 #include <linux/jump_label.h>
7 #include <linux/kobject.h>
8 #include <linux/kref.h>
9 #include <linux/rcupdate.h>
10 #include <linux/sched/cpufreq.h>
11 #include <linux/sched/topology.h>
12 #include <linux/types.h>
13
14 /**
15 * struct em_perf_state - Performance state of a performance domain
16 * @performance: CPU performance (capacity) at a given frequency
17 * @frequency: The frequency in KHz, for consistency with CPUFreq
18 * @power: The power consumed at this level (by 1 CPU or by a registered
19 * device). It can be a total power: static and dynamic.
20 * @cost: The cost coefficient associated with this level, used during
21 * energy calculation. Equal to: power * max_frequency / frequency
22 * @flags: see "em_perf_state flags" description below.
23 */
24 struct em_perf_state {
25 unsigned long performance;
26 unsigned long frequency;
27 unsigned long power;
28 unsigned long cost;
29 unsigned long flags;
30 };
31
32 /*
33 * em_perf_state flags:
34 *
35 * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is
36 * in this em_perf_domain, another performance state with a higher frequency
37 * but a lower or equal power cost. Such inefficient states are ignored when
38 * using em_pd_get_efficient_*() functions.
39 */
40 #define EM_PERF_STATE_INEFFICIENT BIT(0)
41
42 /**
43 * struct em_perf_table - Performance states table
44 * @rcu: RCU used for safe access and destruction
45 * @kref: Reference counter to track the users
46 * @state: List of performance states, in ascending order
47 */
48 struct em_perf_table {
49 struct rcu_head rcu;
50 struct kref kref;
51 struct em_perf_state state[];
52 };
53
54 /**
55 * struct em_perf_domain - Performance domain
56 * @em_table: Pointer to the runtime modifiable em_perf_table
57 * @nr_perf_states: Number of performance states
58 * @min_perf_state: Minimum allowed Performance State index
59 * @max_perf_state: Maximum allowed Performance State index
60 * @flags: See "em_perf_domain flags"
61 * @cpus: Cpumask covering the CPUs of the domain. It's here
62 * for performance reasons to avoid potential cache
63 * misses during energy calculations in the scheduler
64 * and simplifies allocating/freeing that memory region.
65 *
66 * In case of CPU device, a "performance domain" represents a group of CPUs
67 * whose performance is scaled together. All CPUs of a performance domain
68 * must have the same micro-architecture. Performance domains often have
69 * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus
70 * field is unused.
71 */
72 struct em_perf_domain {
73 struct em_perf_table __rcu *em_table;
74 int nr_perf_states;
75 int min_perf_state;
76 int max_perf_state;
77 unsigned long flags;
78 unsigned long cpus[];
79 };
80
81 /*
82 * em_perf_domain flags:
83 *
84 * EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some
85 * other scale.
86 *
87 * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating
88 * energy consumption.
89 *
90 * EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be
91 * created by platform missing real power information
92 */
93 #define EM_PERF_DOMAIN_MICROWATTS BIT(0)
94 #define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1)
95 #define EM_PERF_DOMAIN_ARTIFICIAL BIT(2)
96
97 #define em_span_cpus(em) (to_cpumask((em)->cpus))
98 #define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL)
99
100 #ifdef CONFIG_ENERGY_MODEL
101 /*
102 * The max power value in micro-Watts. The limit of 64 Watts is set as
103 * a safety net to not overflow multiplications on 32bit platforms. The
104 * 32bit value limit for total Perf Domain power implies a limit of
105 * maximum CPUs in such domain to 64.
106 */
107 #define EM_MAX_POWER (64000000) /* 64 Watts */
108
109 /*
110 * To avoid possible energy estimation overflow on 32bit machines add
111 * limits to number of CPUs in the Perf. Domain.
112 * We are safe on 64bit machine, thus some big number.
113 */
114 #ifdef CONFIG_64BIT
115 #define EM_MAX_NUM_CPUS 4096
116 #else
117 #define EM_MAX_NUM_CPUS 16
118 #endif
119
120 struct em_data_callback {
121 /**
122 * active_power() - Provide power at the next performance state of
123 * a device
124 * @dev : Device for which we do this operation (can be a CPU)
125 * @power : Active power at the performance state
126 * (modified)
127 * @freq : Frequency at the performance state in kHz
128 * (modified)
129 *
130 * active_power() must find the lowest performance state of 'dev' above
131 * 'freq' and update 'power' and 'freq' to the matching active power
132 * and frequency.
133 *
134 * In case of CPUs, the power is the one of a single CPU in the domain,
135 * expressed in micro-Watts or an abstract scale. It is expected to
136 * fit in the [0, EM_MAX_POWER] range.
137 *
138 * Return 0 on success.
139 */
140 int (*active_power)(struct device *dev, unsigned long *power,
141 unsigned long *freq);
142
143 /**
144 * get_cost() - Provide the cost at the given performance state of
145 * a device
146 * @dev : Device for which we do this operation (can be a CPU)
147 * @freq : Frequency at the performance state in kHz
148 * @cost : The cost value for the performance state
149 * (modified)
150 *
151 * In case of CPUs, the cost is the one of a single CPU in the domain.
152 * It is expected to fit in the [0, EM_MAX_POWER] range due to internal
153 * usage in EAS calculation.
154 *
155 * Return 0 on success, or appropriate error value in case of failure.
156 */
157 int (*get_cost)(struct device *dev, unsigned long freq,
158 unsigned long *cost);
159 };
160 #define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
161 #define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) \
162 { .active_power = _active_power_cb, \
163 .get_cost = _cost_cb }
164 #define EM_DATA_CB(_active_power_cb) \
165 EM_ADV_DATA_CB(_active_power_cb, NULL)
166
167 struct em_perf_domain *em_cpu_get(int cpu);
168 struct em_perf_domain *em_pd_get(struct device *dev);
169 int em_dev_update_perf_domain(struct device *dev,
170 struct em_perf_table *new_table);
171 int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
172 const struct em_data_callback *cb,
173 const cpumask_t *cpus, bool microwatts);
174 int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
175 const struct em_data_callback *cb,
176 const cpumask_t *cpus, bool microwatts);
177 void em_dev_unregister_perf_domain(struct device *dev);
178 struct em_perf_table *em_table_alloc(struct em_perf_domain *pd);
179 void em_table_free(struct em_perf_table *table);
180 int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
181 int nr_states);
182 int em_dev_update_chip_binning(struct device *dev);
183 int em_update_performance_limits(struct em_perf_domain *pd,
184 unsigned long freq_min_khz, unsigned long freq_max_khz);
185 void em_adjust_cpu_capacity(unsigned int cpu);
186 void em_rebuild_sched_domains(void);
187
188 /**
189 * em_pd_get_efficient_state() - Get an efficient performance state from the EM
190 * @table: List of performance states, in ascending order
191 * @pd: performance domain for which this must be done
192 * @max_util: Max utilization to map with the EM
193 *
194 * It is called from the scheduler code quite frequently and as a consequence
195 * doesn't implement any check.
196 *
197 * Return: An efficient performance state id, high enough to meet @max_util
198 * requirement.
199 */
200 static inline int
em_pd_get_efficient_state(struct em_perf_state * table,struct em_perf_domain * pd,unsigned long max_util)201 em_pd_get_efficient_state(struct em_perf_state *table,
202 struct em_perf_domain *pd, unsigned long max_util)
203 {
204 unsigned long pd_flags = pd->flags;
205 int min_ps = pd->min_perf_state;
206 int max_ps = pd->max_perf_state;
207 struct em_perf_state *ps;
208 int i;
209
210 for (i = min_ps; i <= max_ps; i++) {
211 ps = &table[i];
212 if (ps->performance >= max_util) {
213 if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
214 ps->flags & EM_PERF_STATE_INEFFICIENT)
215 continue;
216 return i;
217 }
218 }
219
220 return max_ps;
221 }
222
223 /**
224 * em_cpu_energy() - Estimates the energy consumed by the CPUs of a
225 * performance domain
226 * @pd : performance domain for which energy has to be estimated
227 * @max_util : highest utilization among CPUs of the domain
228 * @sum_util : sum of the utilization of all CPUs in the domain
229 * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
230 * might reflect reduced frequency (due to thermal)
231 *
232 * This function must be used only for CPU devices. There is no validation,
233 * i.e. if the EM is a CPU type and has cpumask allocated. It is called from
234 * the scheduler code quite frequently and that is why there is not checks.
235 *
236 * Return: the sum of the energy consumed by the CPUs of the domain assuming
237 * a capacity state satisfying the max utilization of the domain.
238 */
em_cpu_energy(struct em_perf_domain * pd,unsigned long max_util,unsigned long sum_util,unsigned long allowed_cpu_cap)239 static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
240 unsigned long max_util, unsigned long sum_util,
241 unsigned long allowed_cpu_cap)
242 {
243 struct em_perf_table *em_table;
244 struct em_perf_state *ps;
245 int i;
246
247 WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n");
248
249 if (!sum_util)
250 return 0;
251
252 /*
253 * In order to predict the performance state, map the utilization of
254 * the most utilized CPU of the performance domain to a requested
255 * performance, like schedutil. Take also into account that the real
256 * performance might be set lower (due to thermal capping). Thus, clamp
257 * max utilization to the allowed CPU capacity before calculating
258 * effective performance.
259 */
260 max_util = min(max_util, allowed_cpu_cap);
261
262 /*
263 * Find the lowest performance state of the Energy Model above the
264 * requested performance.
265 */
266 em_table = rcu_dereference(pd->em_table);
267 i = em_pd_get_efficient_state(em_table->state, pd, max_util);
268 ps = &em_table->state[i];
269
270 /*
271 * The performance (capacity) of a CPU in the domain at the performance
272 * state (ps) can be computed as:
273 *
274 * ps->freq * scale_cpu
275 * ps->performance = -------------------- (1)
276 * cpu_max_freq
277 *
278 * So, ignoring the costs of idle states (which are not available in
279 * the EM), the energy consumed by this CPU at that performance state
280 * is estimated as:
281 *
282 * ps->power * cpu_util
283 * cpu_nrg = -------------------- (2)
284 * ps->performance
285 *
286 * since 'cpu_util / ps->performance' represents its percentage of busy
287 * time.
288 *
289 * NOTE: Although the result of this computation actually is in
290 * units of power, it can be manipulated as an energy value
291 * over a scheduling period, since it is assumed to be
292 * constant during that interval.
293 *
294 * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
295 * of two terms:
296 *
297 * ps->power * cpu_max_freq
298 * cpu_nrg = ------------------------ * cpu_util (3)
299 * ps->freq * scale_cpu
300 *
301 * The first term is static, and is stored in the em_perf_state struct
302 * as 'ps->cost'.
303 *
304 * Since all CPUs of the domain have the same micro-architecture, they
305 * share the same 'ps->cost', and the same CPU capacity. Hence, the
306 * total energy of the domain (which is the simple sum of the energy of
307 * all of its CPUs) can be factorized as:
308 *
309 * pd_nrg = ps->cost * \Sum cpu_util (4)
310 */
311 return ps->cost * sum_util;
312 }
313
314 /**
315 * em_pd_nr_perf_states() - Get the number of performance states of a perf.
316 * domain
317 * @pd : performance domain for which this must be done
318 *
319 * Return: the number of performance states in the performance domain table
320 */
em_pd_nr_perf_states(struct em_perf_domain * pd)321 static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
322 {
323 return pd->nr_perf_states;
324 }
325
326 /**
327 * em_perf_state_from_pd() - Get the performance states table of perf.
328 * domain
329 * @pd : performance domain for which this must be done
330 *
331 * To use this function the rcu_read_lock() should be hold. After the usage
332 * of the performance states table is finished, the rcu_read_unlock() should
333 * be called.
334 *
335 * Return: the pointer to performance states table of the performance domain
336 */
337 static inline
em_perf_state_from_pd(struct em_perf_domain * pd)338 struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
339 {
340 return rcu_dereference(pd->em_table)->state;
341 }
342
343 #else
344 struct em_data_callback {};
345 #define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) { }
346 #define EM_DATA_CB(_active_power_cb) { }
347 #define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
348
349 static inline
em_dev_register_perf_domain(struct device * dev,unsigned int nr_states,const struct em_data_callback * cb,const cpumask_t * cpus,bool microwatts)350 int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
351 const struct em_data_callback *cb,
352 const cpumask_t *cpus, bool microwatts)
353 {
354 return -EINVAL;
355 }
356 static inline
em_dev_register_pd_no_update(struct device * dev,unsigned int nr_states,const struct em_data_callback * cb,const cpumask_t * cpus,bool microwatts)357 int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
358 const struct em_data_callback *cb,
359 const cpumask_t *cpus, bool microwatts)
360 {
361 return -EINVAL;
362 }
em_dev_unregister_perf_domain(struct device * dev)363 static inline void em_dev_unregister_perf_domain(struct device *dev)
364 {
365 }
em_cpu_get(int cpu)366 static inline struct em_perf_domain *em_cpu_get(int cpu)
367 {
368 return NULL;
369 }
em_pd_get(struct device * dev)370 static inline struct em_perf_domain *em_pd_get(struct device *dev)
371 {
372 return NULL;
373 }
em_cpu_energy(struct em_perf_domain * pd,unsigned long max_util,unsigned long sum_util,unsigned long allowed_cpu_cap)374 static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
375 unsigned long max_util, unsigned long sum_util,
376 unsigned long allowed_cpu_cap)
377 {
378 return 0;
379 }
em_pd_nr_perf_states(struct em_perf_domain * pd)380 static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
381 {
382 return 0;
383 }
384 static inline
em_table_alloc(struct em_perf_domain * pd)385 struct em_perf_table *em_table_alloc(struct em_perf_domain *pd)
386 {
387 return NULL;
388 }
em_table_free(struct em_perf_table * table)389 static inline void em_table_free(struct em_perf_table *table) {}
390 static inline
em_dev_update_perf_domain(struct device * dev,struct em_perf_table * new_table)391 int em_dev_update_perf_domain(struct device *dev,
392 struct em_perf_table *new_table)
393 {
394 return -EINVAL;
395 }
396 static inline
em_perf_state_from_pd(struct em_perf_domain * pd)397 struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
398 {
399 return NULL;
400 }
401 static inline
em_dev_compute_costs(struct device * dev,struct em_perf_state * table,int nr_states)402 int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
403 int nr_states)
404 {
405 return -EINVAL;
406 }
em_dev_update_chip_binning(struct device * dev)407 static inline int em_dev_update_chip_binning(struct device *dev)
408 {
409 return -EINVAL;
410 }
411 static inline
em_update_performance_limits(struct em_perf_domain * pd,unsigned long freq_min_khz,unsigned long freq_max_khz)412 int em_update_performance_limits(struct em_perf_domain *pd,
413 unsigned long freq_min_khz, unsigned long freq_max_khz)
414 {
415 return -EINVAL;
416 }
em_adjust_cpu_capacity(unsigned int cpu)417 static inline void em_adjust_cpu_capacity(unsigned int cpu) {}
em_rebuild_sched_domains(void)418 static inline void em_rebuild_sched_domains(void) {}
419 #endif
420
421 #endif
422