xref: /linux/kernel/sched/pelt.c (revision bf76f23aa1c178e9115eba17f699fa726aed669b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Per Entity Load Tracking (PELT)
4  *
5  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  *
7  *  Interactivity improvements by Mike Galbraith
8  *  (C) 2007 Mike Galbraith <efault@gmx.de>
9  *
10  *  Various enhancements by Dmitry Adamushko.
11  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12  *
13  *  Group scheduling enhancements by Srivatsa Vaddagiri
14  *  Copyright IBM Corporation, 2007
15  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16  *
17  *  Scaled math optimizations by Thomas Gleixner
18  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19  *
20  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
22  *
23  *  Move PELT related code from fair.c into this pelt.c file
24  *  Author: Vincent Guittot <vincent.guittot@linaro.org>
25  */
26 #include "pelt.h"
27 
28 /*
29  * Approximate:
30  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
31  */
decay_load(u64 val,u64 n)32 static u64 decay_load(u64 val, u64 n)
33 {
34 	unsigned int local_n;
35 
36 	if (unlikely(n > LOAD_AVG_PERIOD * 63))
37 		return 0;
38 
39 	/* after bounds checking we can collapse to 32-bit */
40 	local_n = n;
41 
42 	/*
43 	 * As y^PERIOD = 1/2, we can combine
44 	 *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
45 	 * With a look-up table which covers y^n (n<PERIOD)
46 	 *
47 	 * To achieve constant time decay_load.
48 	 */
49 	if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
50 		val >>= local_n / LOAD_AVG_PERIOD;
51 		local_n %= LOAD_AVG_PERIOD;
52 	}
53 
54 	val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
55 	return val;
56 }
57 
__accumulate_pelt_segments(u64 periods,u32 d1,u32 d3)58 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
59 {
60 	u32 c1, c2, c3 = d3; /* y^0 == 1 */
61 
62 	/*
63 	 * c1 = d1 y^p
64 	 */
65 	c1 = decay_load((u64)d1, periods);
66 
67 	/*
68 	 *            p-1
69 	 * c2 = 1024 \Sum y^n
70 	 *            n=1
71 	 *
72 	 *              inf        inf
73 	 *    = 1024 ( \Sum y^n - \Sum y^n - y^0 )
74 	 *              n=0        n=p
75 	 */
76 	c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
77 
78 	return c1 + c2 + c3;
79 }
80 
81 /*
82  * Accumulate the three separate parts of the sum; d1 the remainder
83  * of the last (incomplete) period, d2 the span of full periods and d3
84  * the remainder of the (incomplete) current period.
85  *
86  *           d1          d2           d3
87  *           ^           ^            ^
88  *           |           |            |
89  *         |<->|<----------------->|<--->|
90  * ... |---x---|------| ... |------|-----x (now)
91  *
92  *                           p-1
93  * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
94  *                           n=1
95  *
96  *    = u y^p +					(Step 1)
97  *
98  *                     p-1
99  *      d1 y^p + 1024 \Sum y^n + d3 y^0		(Step 2)
100  *                     n=1
101  */
102 static __always_inline u32
accumulate_sum(u64 delta,struct sched_avg * sa,unsigned long load,unsigned long runnable,int running)103 accumulate_sum(u64 delta, struct sched_avg *sa,
104 	       unsigned long load, unsigned long runnable, int running)
105 {
106 	u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
107 	u64 periods;
108 
109 	delta += sa->period_contrib;
110 	periods = delta / 1024; /* A period is 1024us (~1ms) */
111 
112 	/*
113 	 * Step 1: decay old *_sum if we crossed period boundaries.
114 	 */
115 	if (periods) {
116 		sa->load_sum = decay_load(sa->load_sum, periods);
117 		sa->runnable_sum =
118 			decay_load(sa->runnable_sum, periods);
119 		sa->util_sum = decay_load((u64)(sa->util_sum), periods);
120 
121 		/*
122 		 * Step 2
123 		 */
124 		delta %= 1024;
125 		if (load) {
126 			/*
127 			 * This relies on the:
128 			 *
129 			 * if (!load)
130 			 *	runnable = running = 0;
131 			 *
132 			 * clause from ___update_load_sum(); this results in
133 			 * the below usage of @contrib to disappear entirely,
134 			 * so no point in calculating it.
135 			 */
136 			contrib = __accumulate_pelt_segments(periods,
137 					1024 - sa->period_contrib, delta);
138 		}
139 	}
140 	sa->period_contrib = delta;
141 
142 	if (load)
143 		sa->load_sum += load * contrib;
144 	if (runnable)
145 		sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT;
146 	if (running)
147 		sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
148 
149 	return periods;
150 }
151 
152 /*
153  * We can represent the historical contribution to runnable average as the
154  * coefficients of a geometric series.  To do this we sub-divide our runnable
155  * history into segments of approximately 1ms (1024us); label the segment that
156  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
157  *
158  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
159  *      p0            p1           p2
160  *     (now)       (~1ms ago)  (~2ms ago)
161  *
162  * Let u_i denote the fraction of p_i that the entity was runnable.
163  *
164  * We then designate the fractions u_i as our co-efficients, yielding the
165  * following representation of historical load:
166  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
167  *
168  * We choose y based on the with of a reasonably scheduling period, fixing:
169  *   y^32 = 0.5
170  *
171  * This means that the contribution to load ~32ms ago (u_32) will be weighted
172  * approximately half as much as the contribution to load within the last ms
173  * (u_0).
174  *
175  * When a period "rolls over" and we have new u_0`, multiplying the previous
176  * sum again by y is sufficient to update:
177  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
178  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
179  */
180 static __always_inline int
___update_load_sum(u64 now,struct sched_avg * sa,unsigned long load,unsigned long runnable,int running)181 ___update_load_sum(u64 now, struct sched_avg *sa,
182 		  unsigned long load, unsigned long runnable, int running)
183 {
184 	u64 delta;
185 
186 	delta = now - sa->last_update_time;
187 	/*
188 	 * This should only happen when time goes backwards, which it
189 	 * unfortunately does during sched clock init when we swap over to TSC.
190 	 */
191 	if ((s64)delta < 0) {
192 		sa->last_update_time = now;
193 		return 0;
194 	}
195 
196 	/*
197 	 * Use 1024ns as the unit of measurement since it's a reasonable
198 	 * approximation of 1us and fast to compute.
199 	 */
200 	delta >>= 10;
201 	if (!delta)
202 		return 0;
203 
204 	sa->last_update_time += delta << 10;
205 
206 	/*
207 	 * running is a subset of runnable (weight) so running can't be set if
208 	 * runnable is clear. But there are some corner cases where the current
209 	 * se has been already dequeued but cfs_rq->curr still points to it.
210 	 * This means that weight will be 0 but not running for a sched_entity
211 	 * but also for a cfs_rq if the latter becomes idle. As an example,
212 	 * this happens during sched_balance_newidle() which calls
213 	 * sched_balance_update_blocked_averages().
214 	 *
215 	 * Also see the comment in accumulate_sum().
216 	 */
217 	if (!load)
218 		runnable = running = 0;
219 
220 	/*
221 	 * Now we know we crossed measurement unit boundaries. The *_avg
222 	 * accrues by two steps:
223 	 *
224 	 * Step 1: accumulate *_sum since last_update_time. If we haven't
225 	 * crossed period boundaries, finish.
226 	 */
227 	if (!accumulate_sum(delta, sa, load, runnable, running))
228 		return 0;
229 
230 	return 1;
231 }
232 
233 /*
234  * When syncing *_avg with *_sum, we must take into account the current
235  * position in the PELT segment otherwise the remaining part of the segment
236  * will be considered as idle time whereas it's not yet elapsed and this will
237  * generate unwanted oscillation in the range [1002..1024[.
238  *
239  * The max value of *_sum varies with the position in the time segment and is
240  * equals to :
241  *
242  *   LOAD_AVG_MAX*y + sa->period_contrib
243  *
244  * which can be simplified into:
245  *
246  *   LOAD_AVG_MAX - 1024 + sa->period_contrib
247  *
248  * because LOAD_AVG_MAX*y == LOAD_AVG_MAX-1024
249  *
250  * The same care must be taken when a sched entity is added, updated or
251  * removed from a cfs_rq and we need to update sched_avg. Scheduler entities
252  * and the cfs rq, to which they are attached, have the same position in the
253  * time segment because they use the same clock. This means that we can use
254  * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
255  * if it's more convenient.
256  */
257 static __always_inline void
___update_load_avg(struct sched_avg * sa,unsigned long load)258 ___update_load_avg(struct sched_avg *sa, unsigned long load)
259 {
260 	u32 divider = get_pelt_divider(sa);
261 
262 	/*
263 	 * Step 2: update *_avg.
264 	 */
265 	sa->load_avg = div_u64(load * sa->load_sum, divider);
266 	sa->runnable_avg = div_u64(sa->runnable_sum, divider);
267 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
268 }
269 
270 /*
271  * sched_entity:
272  *
273  *   task:
274  *     se_weight()   = se->load.weight
275  *     se_runnable() = !!on_rq
276  *
277  *   group: [ see update_cfs_group() ]
278  *     se_weight()   = tg->weight * grq->load_avg / tg->load_avg
279  *     se_runnable() = grq->h_nr_runnable
280  *
281  *   runnable_sum = se_runnable() * runnable = grq->runnable_sum
282  *   runnable_avg = runnable_sum
283  *
284  *   load_sum := runnable
285  *   load_avg = se_weight(se) * load_sum
286  *
287  * cfq_rq:
288  *
289  *   runnable_sum = \Sum se->avg.runnable_sum
290  *   runnable_avg = \Sum se->avg.runnable_avg
291  *
292  *   load_sum = \Sum se_weight(se) * se->avg.load_sum
293  *   load_avg = \Sum se->avg.load_avg
294  */
295 
__update_load_avg_blocked_se(u64 now,struct sched_entity * se)296 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
297 {
298 	if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
299 		___update_load_avg(&se->avg, se_weight(se));
300 		trace_pelt_se_tp(se);
301 		return 1;
302 	}
303 
304 	return 0;
305 }
306 
__update_load_avg_se(u64 now,struct cfs_rq * cfs_rq,struct sched_entity * se)307 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
308 {
309 	if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se),
310 				cfs_rq->curr == se)) {
311 
312 		___update_load_avg(&se->avg, se_weight(se));
313 		cfs_se_util_change(&se->avg);
314 		trace_pelt_se_tp(se);
315 		return 1;
316 	}
317 
318 	return 0;
319 }
320 
__update_load_avg_cfs_rq(u64 now,struct cfs_rq * cfs_rq)321 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
322 {
323 	if (___update_load_sum(now, &cfs_rq->avg,
324 				scale_load_down(cfs_rq->load.weight),
325 				cfs_rq->h_nr_runnable,
326 				cfs_rq->curr != NULL)) {
327 
328 		___update_load_avg(&cfs_rq->avg, 1);
329 		trace_pelt_cfs_tp(cfs_rq);
330 		return 1;
331 	}
332 
333 	return 0;
334 }
335 
336 /*
337  * rt_rq:
338  *
339  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
340  *   util_sum = cpu_scale * load_sum
341  *   runnable_sum = util_sum
342  *
343  *   load_avg and runnable_avg are not supported and meaningless.
344  *
345  */
346 
update_rt_rq_load_avg(u64 now,struct rq * rq,int running)347 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
348 {
349 	if (___update_load_sum(now, &rq->avg_rt,
350 				running,
351 				running,
352 				running)) {
353 
354 		___update_load_avg(&rq->avg_rt, 1);
355 		trace_pelt_rt_tp(rq);
356 		return 1;
357 	}
358 
359 	return 0;
360 }
361 
362 /*
363  * dl_rq:
364  *
365  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
366  *   util_sum = cpu_scale * load_sum
367  *   runnable_sum = util_sum
368  *
369  *   load_avg and runnable_avg are not supported and meaningless.
370  *
371  */
372 
update_dl_rq_load_avg(u64 now,struct rq * rq,int running)373 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
374 {
375 	if (___update_load_sum(now, &rq->avg_dl,
376 				running,
377 				running,
378 				running)) {
379 
380 		___update_load_avg(&rq->avg_dl, 1);
381 		trace_pelt_dl_tp(rq);
382 		return 1;
383 	}
384 
385 	return 0;
386 }
387 
388 #ifdef CONFIG_SCHED_HW_PRESSURE
389 /*
390  * hardware:
391  *
392  *   load_sum = \Sum se->avg.load_sum but se->avg.load_sum is not tracked
393  *
394  *   util_avg and runnable_load_avg are not supported and meaningless.
395  *
396  * Unlike rt/dl utilization tracking that track time spent by a cpu
397  * running a rt/dl task through util_avg, the average HW pressure is
398  * tracked through load_avg. This is because HW pressure signal is
399  * time weighted "delta" capacity unlike util_avg which is binary.
400  * "delta capacity" =  actual capacity  -
401  *			capped capacity a cpu due to a HW event.
402  */
403 
update_hw_load_avg(u64 now,struct rq * rq,u64 capacity)404 int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
405 {
406 	if (___update_load_sum(now, &rq->avg_hw,
407 			       capacity,
408 			       capacity,
409 			       capacity)) {
410 		___update_load_avg(&rq->avg_hw, 1);
411 		trace_pelt_hw_tp(rq);
412 		return 1;
413 	}
414 
415 	return 0;
416 }
417 #endif /* CONFIG_SCHED_HW_PRESSURE */
418 
419 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
420 /*
421  * IRQ:
422  *
423  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
424  *   util_sum = cpu_scale * load_sum
425  *   runnable_sum = util_sum
426  *
427  *   load_avg and runnable_avg are not supported and meaningless.
428  *
429  */
430 
update_irq_load_avg(struct rq * rq,u64 running)431 int update_irq_load_avg(struct rq *rq, u64 running)
432 {
433 	int ret = 0;
434 
435 	/*
436 	 * We can't use clock_pelt because IRQ time is not accounted in
437 	 * clock_task. Instead we directly scale the running time to
438 	 * reflect the real amount of computation
439 	 */
440 	running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
441 	running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
442 
443 	/*
444 	 * We know the time that has been used by interrupt since last update
445 	 * but we don't when. Let be pessimistic and assume that interrupt has
446 	 * happened just before the update. This is not so far from reality
447 	 * because interrupt will most probably wake up task and trig an update
448 	 * of rq clock during which the metric is updated.
449 	 * We start to decay with normal context time and then we add the
450 	 * interrupt context time.
451 	 * We can safely remove running from rq->clock because
452 	 * rq->clock += delta with delta >= running
453 	 */
454 	ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
455 				0,
456 				0,
457 				0);
458 	ret += ___update_load_sum(rq->clock, &rq->avg_irq,
459 				1,
460 				1,
461 				1);
462 
463 	if (ret) {
464 		___update_load_avg(&rq->avg_irq, 1);
465 		trace_pelt_irq_tp(rq);
466 	}
467 
468 	return ret;
469 }
470 #endif /* CONFIG_HAVE_SCHED_AVG_IRQ */
471 
472 /*
473  * Load avg and utiliztion metrics need to be updated periodically and before
474  * consumption. This function updates the metrics for all subsystems except for
475  * the fair class. @rq must be locked and have its clock updated.
476  */
update_other_load_avgs(struct rq * rq)477 bool update_other_load_avgs(struct rq *rq)
478 {
479 	u64 now = rq_clock_pelt(rq);
480 	const struct sched_class *curr_class = rq->donor->sched_class;
481 	unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
482 
483 	lockdep_assert_rq_held(rq);
484 
485 	/* hw_pressure doesn't care about invariance */
486 	return update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
487 		update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
488 		update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure) |
489 		update_irq_load_avg(rq, 0);
490 }
491